Beta 2.5.3 (#10776)
* ethcore/res: activate atlantis classic hf on block 8772000 (#10766) * fix docker tags for publishing (#10741) * fix: aura don't add `SystemTime::now()` (#10720) This commit does the following: - Prevent overflow in `verify_timestamp()` by not adding `now` to found faulty timestamp - Use explicit `CheckedSystemTime::checked_add` to prevent potential consensus issues because SystemTime is platform depedent - remove `#[cfg(not(time_checked_add))]` conditional compilation * Update version * Treat empty account the same as non-exist accounts in EIP-1052 (#10775) * DevP2p: Get node IP address and udp port from Socket, if not included in PING packet (#10705) * get node IP address and udp port from Socket, if not included in PING packet * prevent bootnodes from being added to host nodes * code corrections * code corrections * code corrections * code corrections * docs * code corrections * code corrections * Apply suggestions from code review Co-Authored-By: David <dvdplm@gmail.com> * Add a way to signal shutdown to snapshotting threads (#10744) * Add a way to signal shutdown to snapshotting threads * Pass Progress to fat_rlps() so we can abort from there too. * Checking for abort in a single spot * Remove nightly-only weak/strong counts * fix warning * Fix tests * Add dummy impl to abort snapshots * Add another dummy impl for TestSnapshotService * Remove debugging code * Return error instead of the odd Ok(()) Switch to AtomicU64 * revert .as_bytes() change * fix build * fix build maybe
This commit is contained in:
parent
ecbafb2390
commit
3fd58bdcbd
12
Cargo.lock
generated
12
Cargo.lock
generated
@ -2457,7 +2457,7 @@ dependencies = [
|
|||||||
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jni 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jni 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"panic_hook 0.1.0",
|
"panic_hook 0.1.0",
|
||||||
"parity-ethereum 2.5.2",
|
"parity-ethereum 2.5.3",
|
||||||
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-current-thread 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-current-thread 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -2487,7 +2487,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-ethereum"
|
name = "parity-ethereum"
|
||||||
version = "2.5.2"
|
version = "2.5.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2540,7 +2540,7 @@ dependencies = [
|
|||||||
"parity-rpc 1.12.0",
|
"parity-rpc 1.12.0",
|
||||||
"parity-runtime 0.1.0",
|
"parity-runtime 0.1.0",
|
||||||
"parity-updater 1.12.0",
|
"parity-updater 1.12.0",
|
||||||
"parity-version 2.5.2",
|
"parity-version 2.5.3",
|
||||||
"parity-whisper 0.1.0",
|
"parity-whisper 0.1.0",
|
||||||
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2683,7 +2683,7 @@ dependencies = [
|
|||||||
"parity-crypto 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-crypto 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-runtime 0.1.0",
|
"parity-runtime 0.1.0",
|
||||||
"parity-updater 1.12.0",
|
"parity-updater 1.12.0",
|
||||||
"parity-version 2.5.2",
|
"parity-version 2.5.3",
|
||||||
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2781,7 +2781,7 @@ dependencies = [
|
|||||||
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-hash-fetch 1.12.0",
|
"parity-hash-fetch 1.12.0",
|
||||||
"parity-path 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-path 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-version 2.5.2",
|
"parity-version 2.5.3",
|
||||||
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2791,7 +2791,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-version"
|
name = "parity-version"
|
||||||
version = "2.5.2"
|
version = "2.5.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rlp 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
description = "Parity Ethereum client"
|
description = "Parity Ethereum client"
|
||||||
name = "parity-ethereum"
|
name = "parity-ethereum"
|
||||||
# NOTE Make sure to update util/version/Cargo.toml as well
|
# NOTE Make sure to update util/version/Cargo.toml as well
|
||||||
version = "2.5.2"
|
version = "2.5.3"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
"ecip1010PauseTransition": "0x2dc6c0",
|
"ecip1010PauseTransition": "0x2dc6c0",
|
||||||
"ecip1010ContinueTransition": "0x4c4b40",
|
"ecip1010ContinueTransition": "0x4c4b40",
|
||||||
"ecip1017EraRounds": "0x4c4b40",
|
"ecip1017EraRounds": "0x4c4b40",
|
||||||
"eip100bTransition": "0x7fffffffffffffff",
|
"eip100bTransition": "0x85d9a0",
|
||||||
"bombDefuseTransition": "0x5a06e0"
|
"bombDefuseTransition": "0x5a06e0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -29,15 +29,15 @@
|
|||||||
"forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f",
|
"forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f",
|
||||||
"eip150Transition": "0x2625a0",
|
"eip150Transition": "0x2625a0",
|
||||||
"eip160Transition": "0x2dc6c0",
|
"eip160Transition": "0x2dc6c0",
|
||||||
"eip161abcTransition": "0x7fffffffffffffff",
|
"eip161abcTransition": "0x85d9a0",
|
||||||
"eip161dTransition": "0x7fffffffffffffff",
|
"eip161dTransition": "0x85d9a0",
|
||||||
"eip155Transition": "0x2dc6c0",
|
"eip155Transition": "0x2dc6c0",
|
||||||
"maxCodeSize": "0x6000",
|
"maxCodeSize": "0x6000",
|
||||||
"maxCodeSizeTransition": "0x7fffffffffffffff",
|
"maxCodeSizeTransition": "0x85d9a0",
|
||||||
"eip140Transition": "0x7fffffffffffffff",
|
"eip140Transition": "0x85d9a0",
|
||||||
"eip211Transition": "0x7fffffffffffffff",
|
"eip211Transition": "0x85d9a0",
|
||||||
"eip214Transition": "0x7fffffffffffffff",
|
"eip214Transition": "0x85d9a0",
|
||||||
"eip658Transition": "0x7fffffffffffffff"
|
"eip658Transition": "0x85d9a0"
|
||||||
},
|
},
|
||||||
"genesis": {
|
"genesis": {
|
||||||
"seal": {
|
"seal": {
|
||||||
@ -3905,7 +3905,7 @@
|
|||||||
"0x0000000000000000000000000000000000000005": {
|
"0x0000000000000000000000000000000000000005": {
|
||||||
"builtin": {
|
"builtin": {
|
||||||
"name": "modexp",
|
"name": "modexp",
|
||||||
"activate_at": "0x7fffffffffffffff",
|
"activate_at": "0x85d9a0",
|
||||||
"pricing": {
|
"pricing": {
|
||||||
"modexp": {
|
"modexp": {
|
||||||
"divisor": 20
|
"divisor": 20
|
||||||
@ -3916,7 +3916,7 @@
|
|||||||
"0x0000000000000000000000000000000000000006": {
|
"0x0000000000000000000000000000000000000006": {
|
||||||
"builtin": {
|
"builtin": {
|
||||||
"name": "alt_bn128_add",
|
"name": "alt_bn128_add",
|
||||||
"activate_at": "0x7fffffffffffffff",
|
"activate_at": "0x85d9a0",
|
||||||
"pricing": {
|
"pricing": {
|
||||||
"linear": {
|
"linear": {
|
||||||
"base": 500,
|
"base": 500,
|
||||||
@ -3928,7 +3928,7 @@
|
|||||||
"0x0000000000000000000000000000000000000007": {
|
"0x0000000000000000000000000000000000000007": {
|
||||||
"builtin": {
|
"builtin": {
|
||||||
"name": "alt_bn128_mul",
|
"name": "alt_bn128_mul",
|
||||||
"activate_at": "0x7fffffffffffffff",
|
"activate_at": "0x85d9a0",
|
||||||
"pricing": {
|
"pricing": {
|
||||||
"linear": {
|
"linear": {
|
||||||
"base": 40000,
|
"base": 40000,
|
||||||
@ -3940,7 +3940,7 @@
|
|||||||
"0x0000000000000000000000000000000000000008": {
|
"0x0000000000000000000000000000000000000008": {
|
||||||
"builtin": {
|
"builtin": {
|
||||||
"name": "alt_bn128_pairing",
|
"name": "alt_bn128_pairing",
|
||||||
"activate_at": "0x7fffffffffffffff",
|
"activate_at": "0x85d9a0",
|
||||||
"pricing": {
|
"pricing": {
|
||||||
"alt_bn128_pairing": {
|
"alt_bn128_pairing": {
|
||||||
"base": 100000,
|
"base": 100000,
|
||||||
|
@ -30,8 +30,10 @@ use blockchain::{BlockChainDB, BlockChainDBHandler};
|
|||||||
use ethcore::client::{Client, ClientConfig, ChainNotify, ClientIoMessage};
|
use ethcore::client::{Client, ClientConfig, ChainNotify, ClientIoMessage};
|
||||||
use ethcore::miner::Miner;
|
use ethcore::miner::Miner;
|
||||||
use ethcore::snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams};
|
use ethcore::snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams};
|
||||||
use ethcore::snapshot::{SnapshotService as _SnapshotService, RestorationStatus};
|
use ethcore::snapshot::{SnapshotService as _SnapshotService, RestorationStatus, Error as SnapshotError};
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
|
use ethcore::error::{Error as EthcoreError, ErrorKind};
|
||||||
|
|
||||||
|
|
||||||
use ethcore_private_tx::{self, Importer, Signer};
|
use ethcore_private_tx::{self, Importer, Signer};
|
||||||
use Error;
|
use Error;
|
||||||
@ -197,6 +199,7 @@ impl ClientService {
|
|||||||
|
|
||||||
/// Shutdown the Client Service
|
/// Shutdown the Client Service
|
||||||
pub fn shutdown(&self) {
|
pub fn shutdown(&self) {
|
||||||
|
trace!(target: "shutdown", "Shutting down Client Service");
|
||||||
self.snapshot.shutdown();
|
self.snapshot.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -257,7 +260,11 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
|
|||||||
|
|
||||||
let res = thread::Builder::new().name("Periodic Snapshot".into()).spawn(move || {
|
let res = thread::Builder::new().name("Periodic Snapshot".into()).spawn(move || {
|
||||||
if let Err(e) = snapshot.take_snapshot(&*client, num) {
|
if let Err(e) = snapshot.take_snapshot(&*client, num) {
|
||||||
warn!("Failed to take snapshot at block #{}: {}", num, e);
|
match e {
|
||||||
|
EthcoreError(ErrorKind::Snapshot(SnapshotError::SnapshotAborted), _) => info!("Snapshot aborted"),
|
||||||
|
_ => warn!("Failed to take snapshot at block #{}: {}", num, e),
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -764,8 +764,8 @@ impl Client {
|
|||||||
liveness: AtomicBool::new(awake),
|
liveness: AtomicBool::new(awake),
|
||||||
mode: Mutex::new(config.mode.clone()),
|
mode: Mutex::new(config.mode.clone()),
|
||||||
chain: RwLock::new(chain),
|
chain: RwLock::new(chain),
|
||||||
tracedb: tracedb,
|
tracedb,
|
||||||
engine: engine,
|
engine,
|
||||||
pruning: config.pruning.clone(),
|
pruning: config.pruning.clone(),
|
||||||
db: RwLock::new(db.clone()),
|
db: RwLock::new(db.clone()),
|
||||||
state_db: RwLock::new(state_db),
|
state_db: RwLock::new(state_db),
|
||||||
@ -778,8 +778,8 @@ impl Client {
|
|||||||
ancient_blocks_import_lock: Default::default(),
|
ancient_blocks_import_lock: Default::default(),
|
||||||
queue_consensus_message: IoChannelQueue::new(usize::max_value()),
|
queue_consensus_message: IoChannelQueue::new(usize::max_value()),
|
||||||
last_hashes: RwLock::new(VecDeque::new()),
|
last_hashes: RwLock::new(VecDeque::new()),
|
||||||
factories: factories,
|
factories,
|
||||||
history: history,
|
history,
|
||||||
on_user_defaults_change: Mutex::new(None),
|
on_user_defaults_change: Mutex::new(None),
|
||||||
registrar_address,
|
registrar_address,
|
||||||
exit_handler: Mutex::new(None),
|
exit_handler: Mutex::new(None),
|
||||||
@ -1138,7 +1138,12 @@ impl Client {
|
|||||||
|
|
||||||
/// Take a snapshot at the given block.
|
/// Take a snapshot at the given block.
|
||||||
/// If the ID given is "latest", this will default to 1000 blocks behind.
|
/// If the ID given is "latest", this will default to 1000 blocks behind.
|
||||||
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockId, p: &snapshot::Progress) -> Result<(), EthcoreError> {
|
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(
|
||||||
|
&self,
|
||||||
|
writer: W,
|
||||||
|
at: BlockId,
|
||||||
|
p: &snapshot::Progress,
|
||||||
|
) -> Result<(), EthcoreError> {
|
||||||
let db = self.state_db.read().journal_db().boxed_clone();
|
let db = self.state_db.read().journal_db().boxed_clone();
|
||||||
let best_block_number = self.chain_info().best_block_number;
|
let best_block_number = self.chain_info().best_block_number;
|
||||||
let block_number = self.block_number(at).ok_or_else(|| snapshot::Error::InvalidStartingBlock(at))?;
|
let block_number = self.block_number(at).ok_or_else(|| snapshot::Error::InvalidStartingBlock(at))?;
|
||||||
@ -1168,8 +1173,16 @@ impl Client {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let processing_threads = self.config.snapshot.processing_threads;
|
let processing_threads = self.config.snapshot.processing_threads;
|
||||||
snapshot::take_snapshot(&*self.engine, &self.chain.read(), start_hash, db.as_hash_db(), writer, p, processing_threads)?;
|
let chunker = self.engine.snapshot_components().ok_or(snapshot::Error::SnapshotsUnsupported)?;
|
||||||
|
snapshot::take_snapshot(
|
||||||
|
chunker,
|
||||||
|
&self.chain.read(),
|
||||||
|
start_hash,
|
||||||
|
db.as_hash_db(),
|
||||||
|
writer,
|
||||||
|
p,
|
||||||
|
processing_threads,
|
||||||
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ use std::iter::FromIterator;
|
|||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::sync::{Weak, Arc};
|
use std::sync::{Weak, Arc};
|
||||||
use std::time::{UNIX_EPOCH, SystemTime, Duration};
|
use std::time::{UNIX_EPOCH, Duration};
|
||||||
|
|
||||||
use block::*;
|
use block::*;
|
||||||
use client::EngineClient;
|
use client::EngineClient;
|
||||||
@ -42,14 +42,12 @@ use itertools::{self, Itertools};
|
|||||||
use rlp::{encode, Decodable, DecoderError, Encodable, RlpStream, Rlp};
|
use rlp::{encode, Decodable, DecoderError, Encodable, RlpStream, Rlp};
|
||||||
use ethereum_types::{H256, H520, Address, U128, U256};
|
use ethereum_types::{H256, H520, Address, U128, U256};
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
|
use time_utils::CheckedSystemTime;
|
||||||
use types::BlockNumber;
|
use types::BlockNumber;
|
||||||
use types::header::{Header, ExtendedHeader};
|
use types::header::{Header, ExtendedHeader};
|
||||||
use types::ancestry_action::AncestryAction;
|
use types::ancestry_action::AncestryAction;
|
||||||
use unexpected::{Mismatch, OutOfBounds};
|
use unexpected::{Mismatch, OutOfBounds};
|
||||||
|
|
||||||
#[cfg(not(time_checked_add))]
|
|
||||||
use time_utils::CheckedSystemTime;
|
|
||||||
|
|
||||||
mod finality;
|
mod finality;
|
||||||
|
|
||||||
/// `AuthorityRound` params.
|
/// `AuthorityRound` params.
|
||||||
@ -578,10 +576,10 @@ fn verify_timestamp(step: &Step, header_step: u64) -> Result<(), BlockError> {
|
|||||||
// Returning it further won't recover the sync process.
|
// Returning it further won't recover the sync process.
|
||||||
trace!(target: "engine", "verify_timestamp: block too early");
|
trace!(target: "engine", "verify_timestamp: block too early");
|
||||||
|
|
||||||
let now = SystemTime::now();
|
let found = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(oob.found))
|
||||||
let found = now.checked_add(Duration::from_secs(oob.found)).ok_or(BlockError::TimestampOverflow)?;
|
.ok_or(BlockError::TimestampOverflow)?;
|
||||||
let max = oob.max.and_then(|m| now.checked_add(Duration::from_secs(m)));
|
let max = oob.max.and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m)));
|
||||||
let min = oob.min.and_then(|m| now.checked_add(Duration::from_secs(m)));
|
let min = oob.min.and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m)));
|
||||||
|
|
||||||
let new_oob = OutOfBounds { min, max, found };
|
let new_oob = OutOfBounds { min, max, found };
|
||||||
|
|
||||||
|
@ -24,13 +24,11 @@ use engines::clique::{VoteType, DIFF_INTURN, DIFF_NOTURN, NULL_AUTHOR, SIGNING_D
|
|||||||
use error::{Error, BlockError};
|
use error::{Error, BlockError};
|
||||||
use ethereum_types::{Address, H64};
|
use ethereum_types::{Address, H64};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
|
use time_utils::CheckedSystemTime;
|
||||||
use types::BlockNumber;
|
use types::BlockNumber;
|
||||||
use types::header::Header;
|
use types::header::Header;
|
||||||
use unexpected::Mismatch;
|
use unexpected::Mismatch;
|
||||||
|
|
||||||
#[cfg(not(feature = "time_checked_add"))]
|
|
||||||
use time_utils::CheckedSystemTime;
|
|
||||||
|
|
||||||
/// Type that keeps track of the state for a given vote
|
/// Type that keeps track of the state for a given vote
|
||||||
// Votes that go against the proposal aren't counted since it's equivalent to not voting
|
// Votes that go against the proposal aren't counted since it's equivalent to not voting
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
|
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
|
||||||
@ -268,7 +266,7 @@ impl CliqueBlockState {
|
|||||||
// This is a quite bad API because we must mutate both variables even when already `inturn` fails
|
// This is a quite bad API because we must mutate both variables even when already `inturn` fails
|
||||||
// That's why we can't return early and must have the `if-else` in the end
|
// That's why we can't return early and must have the `if-else` in the end
|
||||||
pub fn calc_next_timestamp(&mut self, timestamp: u64, period: u64) -> Result<(), Error> {
|
pub fn calc_next_timestamp(&mut self, timestamp: u64, period: u64) -> Result<(), Error> {
|
||||||
let inturn = UNIX_EPOCH.checked_add(Duration::from_secs(timestamp.saturating_add(period)));
|
let inturn = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(timestamp.saturating_add(period)));
|
||||||
|
|
||||||
self.next_timestamp_inturn = inturn;
|
self.next_timestamp_inturn = inturn;
|
||||||
|
|
||||||
|
@ -82,12 +82,10 @@ use parking_lot::RwLock;
|
|||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use super::signer::EngineSigner;
|
use super::signer::EngineSigner;
|
||||||
use unexpected::{Mismatch, OutOfBounds};
|
use unexpected::{Mismatch, OutOfBounds};
|
||||||
|
use time_utils::CheckedSystemTime;
|
||||||
use types::BlockNumber;
|
use types::BlockNumber;
|
||||||
use types::header::{ExtendedHeader, Header};
|
use types::header::{ExtendedHeader, Header};
|
||||||
|
|
||||||
#[cfg(not(feature = "time_checked_add"))]
|
|
||||||
use time_utils::CheckedSystemTime;
|
|
||||||
|
|
||||||
use self::block_state::CliqueBlockState;
|
use self::block_state::CliqueBlockState;
|
||||||
use self::params::CliqueParams;
|
use self::params::CliqueParams;
|
||||||
use self::step_service::StepService;
|
use self::step_service::StepService;
|
||||||
@ -536,7 +534,7 @@ impl Engine<EthereumMachine> for Clique {
|
|||||||
|
|
||||||
// Don't waste time checking blocks from the future
|
// Don't waste time checking blocks from the future
|
||||||
{
|
{
|
||||||
let limit = SystemTime::now().checked_add(Duration::from_secs(self.period))
|
let limit = CheckedSystemTime::checked_add(SystemTime::now(), Duration::from_secs(self.period))
|
||||||
.ok_or(BlockError::TimestampOverflow)?;
|
.ok_or(BlockError::TimestampOverflow)?;
|
||||||
|
|
||||||
// This should succeed under the contraints that the system clock works
|
// This should succeed under the contraints that the system clock works
|
||||||
@ -546,7 +544,7 @@ impl Engine<EthereumMachine> for Clique {
|
|||||||
|
|
||||||
let hdr = Duration::from_secs(header.timestamp());
|
let hdr = Duration::from_secs(header.timestamp());
|
||||||
if hdr > limit_as_dur {
|
if hdr > limit_as_dur {
|
||||||
let found = UNIX_EPOCH.checked_add(hdr).ok_or(BlockError::TimestampOverflow)?;
|
let found = CheckedSystemTime::checked_add(UNIX_EPOCH, hdr).ok_or(BlockError::TimestampOverflow)?;
|
||||||
|
|
||||||
Err(BlockError::TemporarilyInvalid(OutOfBounds {
|
Err(BlockError::TemporarilyInvalid(OutOfBounds {
|
||||||
min: None,
|
min: None,
|
||||||
@ -657,8 +655,8 @@ impl Engine<EthereumMachine> for Clique {
|
|||||||
// Ensure that the block's timestamp isn't too close to it's parent
|
// Ensure that the block's timestamp isn't too close to it's parent
|
||||||
let limit = parent.timestamp().saturating_add(self.period);
|
let limit = parent.timestamp().saturating_add(self.period);
|
||||||
if limit > header.timestamp() {
|
if limit > header.timestamp() {
|
||||||
let max = UNIX_EPOCH.checked_add(Duration::from_secs(header.timestamp()));
|
let max = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp()));
|
||||||
let found = UNIX_EPOCH.checked_add(Duration::from_secs(limit))
|
let found = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(limit))
|
||||||
.ok_or(BlockError::TimestampOverflow)?;
|
.ok_or(BlockError::TimestampOverflow)?;
|
||||||
|
|
||||||
Err(BlockError::InvalidTimestamp(OutOfBounds {
|
Err(BlockError::InvalidTimestamp(OutOfBounds {
|
||||||
|
@ -314,7 +314,11 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn extcodehash(&self, address: &Address) -> vm::Result<Option<H256>> {
|
fn extcodehash(&self, address: &Address) -> vm::Result<Option<H256>> {
|
||||||
|
if self.state.exists_and_not_null(address)? {
|
||||||
Ok(self.state.code_hash(address)?)
|
Ok(self.state.code_hash(address)?)
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extcodesize(&self, address: &Address) -> vm::Result<Option<usize>> {
|
fn extcodesize(&self, address: &Address) -> vm::Result<Option<usize>> {
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#![warn(missing_docs, unused_extern_crates)]
|
#![warn(missing_docs, unused_extern_crates)]
|
||||||
#![cfg_attr(feature = "time_checked_add", feature(time_checked_add))]
|
|
||||||
|
|
||||||
//! Ethcore library
|
//! Ethcore library
|
||||||
//!
|
//!
|
||||||
@ -100,6 +99,7 @@ extern crate rlp;
|
|||||||
extern crate rustc_hex;
|
extern crate rustc_hex;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate stats;
|
extern crate stats;
|
||||||
|
extern crate time_utils;
|
||||||
extern crate triehash_ethereum as triehash;
|
extern crate triehash_ethereum as triehash;
|
||||||
extern crate unexpected;
|
extern crate unexpected;
|
||||||
extern crate using_queue;
|
extern crate using_queue;
|
||||||
@ -149,9 +149,6 @@ extern crate fetch;
|
|||||||
#[cfg(all(test, feature = "price-info"))]
|
#[cfg(all(test, feature = "price-info"))]
|
||||||
extern crate parity_runtime;
|
extern crate parity_runtime;
|
||||||
|
|
||||||
#[cfg(not(time_checked_add))]
|
|
||||||
extern crate time_utils;
|
|
||||||
|
|
||||||
pub mod block;
|
pub mod block;
|
||||||
pub mod builtin;
|
pub mod builtin;
|
||||||
pub mod client;
|
pub mod client;
|
||||||
|
@ -24,9 +24,10 @@ use ethtrie::{TrieDB, TrieDBMut};
|
|||||||
use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP};
|
use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP};
|
||||||
use hash_db::HashDB;
|
use hash_db::HashDB;
|
||||||
use rlp::{RlpStream, Rlp};
|
use rlp::{RlpStream, Rlp};
|
||||||
use snapshot::Error;
|
use snapshot::{Error, Progress};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use trie::{Trie, TrieMut};
|
use trie::{Trie, TrieMut};
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
// An empty account -- these were replaced with RLP null data for a space optimization in v1.
|
// An empty account -- these were replaced with RLP null data for a space optimization in v1.
|
||||||
const ACC_EMPTY: BasicAccount = BasicAccount {
|
const ACC_EMPTY: BasicAccount = BasicAccount {
|
||||||
@ -65,8 +66,16 @@ impl CodeState {
|
|||||||
// walk the account's storage trie, returning a vector of RLP items containing the
|
// walk the account's storage trie, returning a vector of RLP items containing the
|
||||||
// account address hash, account properties and the storage. Each item contains at most `max_storage_items`
|
// account address hash, account properties and the storage. Each item contains at most `max_storage_items`
|
||||||
// storage records split according to snapshot format definition.
|
// storage records split according to snapshot format definition.
|
||||||
pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, first_chunk_size: usize, max_chunk_size: usize) -> Result<Vec<Bytes>, Error> {
|
pub fn to_fat_rlps(
|
||||||
let db = &(acct_db as &HashDB<_,_>);
|
account_hash: &H256,
|
||||||
|
acc: &BasicAccount,
|
||||||
|
acct_db: &AccountDB,
|
||||||
|
used_code: &mut HashSet<H256>,
|
||||||
|
first_chunk_size: usize,
|
||||||
|
max_chunk_size: usize,
|
||||||
|
p: &Progress,
|
||||||
|
) -> Result<Vec<Bytes>, Error> {
|
||||||
|
let db = &(acct_db as &dyn HashDB<_,_>);
|
||||||
let db = TrieDB::new(db, &acc.storage_root)?;
|
let db = TrieDB::new(db, &acc.storage_root)?;
|
||||||
let mut chunks = Vec::new();
|
let mut chunks = Vec::new();
|
||||||
let mut db_iter = db.iter()?;
|
let mut db_iter = db.iter()?;
|
||||||
@ -112,6 +121,10 @@ pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB,
|
|||||||
}
|
}
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
|
if p.abort.load(Ordering::SeqCst) {
|
||||||
|
trace!(target: "snapshot", "to_fat_rlps: aborting snapshot");
|
||||||
|
return Err(Error::SnapshotAborted);
|
||||||
|
}
|
||||||
match db_iter.next() {
|
match db_iter.next() {
|
||||||
Some(Ok((k, v))) => {
|
Some(Ok((k, v))) => {
|
||||||
let pair = {
|
let pair = {
|
||||||
@ -211,6 +224,7 @@ mod tests {
|
|||||||
use types::basic_account::BasicAccount;
|
use types::basic_account::BasicAccount;
|
||||||
use test_helpers::get_temp_state_db;
|
use test_helpers::get_temp_state_db;
|
||||||
use snapshot::tests::helpers::fill_storage;
|
use snapshot::tests::helpers::fill_storage;
|
||||||
|
use snapshot::Progress;
|
||||||
|
|
||||||
use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak};
|
use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak};
|
||||||
use ethereum_types::{H256, Address};
|
use ethereum_types::{H256, Address};
|
||||||
@ -236,8 +250,8 @@ mod tests {
|
|||||||
|
|
||||||
let thin_rlp = ::rlp::encode(&account);
|
let thin_rlp = ::rlp::encode(&account);
|
||||||
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp).unwrap(), account);
|
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp).unwrap(), account);
|
||||||
|
let p = Progress::default();
|
||||||
let fat_rlps = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap();
|
let fat_rlps = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), usize::max_value(), usize::max_value(), &p).unwrap();
|
||||||
let fat_rlp = Rlp::new(&fat_rlps[0]).at(1).unwrap();
|
let fat_rlp = Rlp::new(&fat_rlps[0]).at(1).unwrap();
|
||||||
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
|
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
|
||||||
}
|
}
|
||||||
@ -262,7 +276,9 @@ mod tests {
|
|||||||
let thin_rlp = ::rlp::encode(&account);
|
let thin_rlp = ::rlp::encode(&account);
|
||||||
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp).unwrap(), account);
|
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp).unwrap(), account);
|
||||||
|
|
||||||
let fat_rlp = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap();
|
let p = Progress::default();
|
||||||
|
|
||||||
|
let fat_rlp = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), usize::max_value(), usize::max_value(), &p).unwrap();
|
||||||
let fat_rlp = Rlp::new(&fat_rlp[0]).at(1).unwrap();
|
let fat_rlp = Rlp::new(&fat_rlp[0]).at(1).unwrap();
|
||||||
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
|
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
|
||||||
}
|
}
|
||||||
@ -287,7 +303,8 @@ mod tests {
|
|||||||
let thin_rlp = ::rlp::encode(&account);
|
let thin_rlp = ::rlp::encode(&account);
|
||||||
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp).unwrap(), account);
|
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp).unwrap(), account);
|
||||||
|
|
||||||
let fat_rlps = to_fat_rlps(&keccak(addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), 500, 1000).unwrap();
|
let p = Progress::default();
|
||||||
|
let fat_rlps = to_fat_rlps(&keccak(addr), &account, &AccountDB::new(db.as_hash_db(), &addr), &mut Default::default(), 500, 1000, &p).unwrap();
|
||||||
let mut root = KECCAK_NULL_RLP;
|
let mut root = KECCAK_NULL_RLP;
|
||||||
let mut restored_account = None;
|
let mut restored_account = None;
|
||||||
for rlp in fat_rlps {
|
for rlp in fat_rlps {
|
||||||
@ -319,20 +336,21 @@ mod tests {
|
|||||||
nonce: 50.into(),
|
nonce: 50.into(),
|
||||||
balance: 123456789.into(),
|
balance: 123456789.into(),
|
||||||
storage_root: KECCAK_NULL_RLP,
|
storage_root: KECCAK_NULL_RLP,
|
||||||
code_hash: code_hash,
|
code_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
let account2 = BasicAccount {
|
let account2 = BasicAccount {
|
||||||
nonce: 400.into(),
|
nonce: 400.into(),
|
||||||
balance: 98765432123456789usize.into(),
|
balance: 98765432123456789usize.into(),
|
||||||
storage_root: KECCAK_NULL_RLP,
|
storage_root: KECCAK_NULL_RLP,
|
||||||
code_hash: code_hash,
|
code_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut used_code = HashSet::new();
|
let mut used_code = HashSet::new();
|
||||||
|
let p1 = Progress::default();
|
||||||
let fat_rlp1 = to_fat_rlps(&keccak(&addr1), &account1, &AccountDB::new(db.as_hash_db(), &addr1), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
|
let p2 = Progress::default();
|
||||||
let fat_rlp2 = to_fat_rlps(&keccak(&addr2), &account2, &AccountDB::new(db.as_hash_db(), &addr2), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
|
let fat_rlp1 = to_fat_rlps(&keccak(&addr1), &account1, &AccountDB::new(db.as_hash_db(), &addr1), &mut used_code, usize::max_value(), usize::max_value(), &p1).unwrap();
|
||||||
|
let fat_rlp2 = to_fat_rlps(&keccak(&addr2), &account2, &AccountDB::new(db.as_hash_db(), &addr2), &mut used_code, usize::max_value(), usize::max_value(), &p2).unwrap();
|
||||||
assert_eq!(used_code.len(), 1);
|
assert_eq!(used_code.len(), 1);
|
||||||
|
|
||||||
let fat_rlp1 = Rlp::new(&fat_rlp1[0]).at(1).unwrap();
|
let fat_rlp1 = Rlp::new(&fat_rlp1[0]).at(1).unwrap();
|
||||||
@ -350,6 +368,6 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn encoding_empty_acc() {
|
fn encoding_empty_acc() {
|
||||||
let mut db = get_temp_state_db();
|
let mut db = get_temp_state_db();
|
||||||
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &Address::default()), Rlp::new(&::rlp::NULL_RLP), H256::zero()).unwrap(), (ACC_EMPTY, None));
|
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hash_db_mut(), &Address::zero()), Rlp::new(&::rlp::NULL_RLP), H256::zero()).unwrap(), (ACC_EMPTY, None));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,6 +61,8 @@ pub enum Error {
|
|||||||
ChunkTooLarge,
|
ChunkTooLarge,
|
||||||
/// Snapshots not supported by the consensus engine.
|
/// Snapshots not supported by the consensus engine.
|
||||||
SnapshotsUnsupported,
|
SnapshotsUnsupported,
|
||||||
|
/// Aborted snapshot
|
||||||
|
SnapshotAborted,
|
||||||
/// Bad epoch transition.
|
/// Bad epoch transition.
|
||||||
BadEpochProof(u64),
|
BadEpochProof(u64),
|
||||||
/// Wrong chunk format.
|
/// Wrong chunk format.
|
||||||
@ -91,6 +93,7 @@ impl fmt::Display for Error {
|
|||||||
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
|
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
|
||||||
Error::ChunkTooLarge => write!(f, "Chunk size is too large."),
|
Error::ChunkTooLarge => write!(f, "Chunk size is too large."),
|
||||||
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
|
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
|
||||||
|
Error::SnapshotAborted => write!(f, "Snapshot was aborted."),
|
||||||
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
|
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
|
||||||
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
|
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
|
||||||
Error::UnlinkedAncientBlockChain => write!(f, "Unlinked ancient blocks chain"),
|
Error::UnlinkedAncientBlockChain => write!(f, "Unlinked ancient blocks chain"),
|
||||||
|
@ -310,10 +310,7 @@ impl LooseReader {
|
|||||||
|
|
||||||
dir.pop();
|
dir.pop();
|
||||||
|
|
||||||
Ok(LooseReader {
|
Ok(LooseReader { dir, manifest })
|
||||||
dir: dir,
|
|
||||||
manifest: manifest,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering};
|
||||||
use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY};
|
use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY};
|
||||||
|
|
||||||
use account_db::{AccountDB, AccountDBMut};
|
use account_db::{AccountDB, AccountDBMut};
|
||||||
@ -107,7 +107,7 @@ impl Default for SnapshotConfiguration {
|
|||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
SnapshotConfiguration {
|
SnapshotConfiguration {
|
||||||
no_periodic: false,
|
no_periodic: false,
|
||||||
processing_threads: ::std::cmp::max(1, num_cpus::get() / 2),
|
processing_threads: ::std::cmp::max(1, num_cpus::get_physical() / 2),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,8 +117,9 @@ impl Default for SnapshotConfiguration {
|
|||||||
pub struct Progress {
|
pub struct Progress {
|
||||||
accounts: AtomicUsize,
|
accounts: AtomicUsize,
|
||||||
blocks: AtomicUsize,
|
blocks: AtomicUsize,
|
||||||
size: AtomicUsize, // Todo [rob] use Atomicu64 when it stabilizes.
|
size: AtomicU64,
|
||||||
done: AtomicBool,
|
done: AtomicBool,
|
||||||
|
abort: AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Progress {
|
impl Progress {
|
||||||
@ -127,6 +128,7 @@ impl Progress {
|
|||||||
self.accounts.store(0, Ordering::Release);
|
self.accounts.store(0, Ordering::Release);
|
||||||
self.blocks.store(0, Ordering::Release);
|
self.blocks.store(0, Ordering::Release);
|
||||||
self.size.store(0, Ordering::Release);
|
self.size.store(0, Ordering::Release);
|
||||||
|
self.abort.store(false, Ordering::Release);
|
||||||
|
|
||||||
// atomic fence here to ensure the others are written first?
|
// atomic fence here to ensure the others are written first?
|
||||||
// logs might very rarely get polluted if not.
|
// logs might very rarely get polluted if not.
|
||||||
@ -140,7 +142,7 @@ impl Progress {
|
|||||||
pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Acquire) }
|
pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Acquire) }
|
||||||
|
|
||||||
/// Get the written size of the snapshot in bytes.
|
/// Get the written size of the snapshot in bytes.
|
||||||
pub fn size(&self) -> usize { self.size.load(Ordering::Acquire) }
|
pub fn size(&self) -> u64 { self.size.load(Ordering::Acquire) }
|
||||||
|
|
||||||
/// Whether the snapshot is complete.
|
/// Whether the snapshot is complete.
|
||||||
pub fn done(&self) -> bool { self.done.load(Ordering::Acquire) }
|
pub fn done(&self) -> bool { self.done.load(Ordering::Acquire) }
|
||||||
@ -148,27 +150,28 @@ impl Progress {
|
|||||||
}
|
}
|
||||||
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
||||||
pub fn take_snapshot<W: SnapshotWriter + Send>(
|
pub fn take_snapshot<W: SnapshotWriter + Send>(
|
||||||
engine: &EthEngine,
|
chunker: Box<dyn SnapshotComponents>,
|
||||||
chain: &BlockChain,
|
chain: &BlockChain,
|
||||||
block_at: H256,
|
block_hash: H256,
|
||||||
state_db: &HashDB<KeccakHasher, DBValue>,
|
state_db: &dyn HashDB<KeccakHasher, DBValue>,
|
||||||
writer: W,
|
writer: W,
|
||||||
p: &Progress,
|
p: &Progress,
|
||||||
processing_threads: usize,
|
processing_threads: usize,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let start_header = chain.block_header_data(&block_at)
|
let start_header = chain.block_header_data(&block_hash)
|
||||||
.ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_at)))?;
|
.ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_hash)))?;
|
||||||
let state_root = start_header.state_root();
|
let state_root = start_header.state_root();
|
||||||
let number = start_header.number();
|
let block_number = start_header.number();
|
||||||
|
|
||||||
info!("Taking snapshot starting at block {}", number);
|
info!("Taking snapshot starting at block {}", block_number);
|
||||||
|
|
||||||
|
let version = chunker.current_version();
|
||||||
let writer = Mutex::new(writer);
|
let writer = Mutex::new(writer);
|
||||||
let chunker = engine.snapshot_components().ok_or(Error::SnapshotsUnsupported)?;
|
|
||||||
let snapshot_version = chunker.current_version();
|
|
||||||
let (state_hashes, block_hashes) = scope(|scope| -> Result<(Vec<H256>, Vec<H256>), Error> {
|
let (state_hashes, block_hashes) = scope(|scope| -> Result<(Vec<H256>, Vec<H256>), Error> {
|
||||||
let writer = &writer;
|
let writer = &writer;
|
||||||
let block_guard = scope.spawn(move || chunk_secondary(chunker, chain, block_at, writer, p));
|
let block_guard = scope.spawn(move || {
|
||||||
|
chunk_secondary(chunker, chain, block_hash, writer, p)
|
||||||
|
});
|
||||||
|
|
||||||
// The number of threads must be between 1 and SNAPSHOT_SUBPARTS
|
// The number of threads must be between 1 and SNAPSHOT_SUBPARTS
|
||||||
assert!(processing_threads >= 1, "Cannot use less than 1 threads for creating snapshots");
|
assert!(processing_threads >= 1, "Cannot use less than 1 threads for creating snapshots");
|
||||||
@ -183,7 +186,7 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
|
|||||||
|
|
||||||
for part in (thread_idx..SNAPSHOT_SUBPARTS).step_by(num_threads) {
|
for part in (thread_idx..SNAPSHOT_SUBPARTS).step_by(num_threads) {
|
||||||
debug!(target: "snapshot", "Chunking part {} in thread {}", part, thread_idx);
|
debug!(target: "snapshot", "Chunking part {} in thread {}", part, thread_idx);
|
||||||
let mut hashes = chunk_state(state_db, &state_root, writer, p, Some(part))?;
|
let mut hashes = chunk_state(state_db, &state_root, writer, p, Some(part), thread_idx)?;
|
||||||
chunk_hashes.append(&mut hashes);
|
chunk_hashes.append(&mut hashes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,12 +210,12 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
|
|||||||
info!(target: "snapshot", "produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len());
|
info!(target: "snapshot", "produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len());
|
||||||
|
|
||||||
let manifest_data = ManifestData {
|
let manifest_data = ManifestData {
|
||||||
version: snapshot_version,
|
version,
|
||||||
state_hashes: state_hashes,
|
state_hashes,
|
||||||
block_hashes: block_hashes,
|
block_hashes,
|
||||||
state_root: state_root,
|
state_root,
|
||||||
block_number: number,
|
block_number,
|
||||||
block_hash: block_at,
|
block_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
writer.into_inner().finish(manifest_data)?;
|
writer.into_inner().finish(manifest_data)?;
|
||||||
@ -228,7 +231,13 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
|
|||||||
/// Secondary chunks are engine-specific, but they intend to corroborate the state data
|
/// Secondary chunks are engine-specific, but they intend to corroborate the state data
|
||||||
/// in the state chunks.
|
/// in the state chunks.
|
||||||
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
||||||
pub fn chunk_secondary<'a>(mut chunker: Box<SnapshotComponents>, chain: &'a BlockChain, start_hash: H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
pub fn chunk_secondary<'a>(
|
||||||
|
mut chunker: Box<dyn SnapshotComponents>,
|
||||||
|
chain: &'a BlockChain,
|
||||||
|
start_hash: H256,
|
||||||
|
writer: &Mutex<dyn SnapshotWriter + 'a>,
|
||||||
|
progress: &'a Progress
|
||||||
|
) -> Result<Vec<H256>, Error> {
|
||||||
let mut chunk_hashes = Vec::new();
|
let mut chunk_hashes = Vec::new();
|
||||||
let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)];
|
let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)];
|
||||||
|
|
||||||
@ -243,7 +252,7 @@ pub fn chunk_secondary<'a>(mut chunker: Box<SnapshotComponents>, chain: &'a Bloc
|
|||||||
trace!(target: "snapshot", "wrote secondary chunk. hash: {:x}, size: {}, uncompressed size: {}",
|
trace!(target: "snapshot", "wrote secondary chunk. hash: {:x}, size: {}, uncompressed size: {}",
|
||||||
hash, size, raw_data.len());
|
hash, size, raw_data.len());
|
||||||
|
|
||||||
progress.size.fetch_add(size, Ordering::SeqCst);
|
progress.size.fetch_add(size as u64, Ordering::SeqCst);
|
||||||
chunk_hashes.push(hash);
|
chunk_hashes.push(hash);
|
||||||
Ok(())
|
Ok(())
|
||||||
};
|
};
|
||||||
@ -266,8 +275,9 @@ struct StateChunker<'a> {
|
|||||||
rlps: Vec<Bytes>,
|
rlps: Vec<Bytes>,
|
||||||
cur_size: usize,
|
cur_size: usize,
|
||||||
snappy_buffer: Vec<u8>,
|
snappy_buffer: Vec<u8>,
|
||||||
writer: &'a Mutex<SnapshotWriter + 'a>,
|
writer: &'a Mutex<dyn SnapshotWriter + 'a>,
|
||||||
progress: &'a Progress,
|
progress: &'a Progress,
|
||||||
|
thread_idx: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> StateChunker<'a> {
|
impl<'a> StateChunker<'a> {
|
||||||
@ -297,10 +307,10 @@ impl<'a> StateChunker<'a> {
|
|||||||
let hash = keccak(&compressed);
|
let hash = keccak(&compressed);
|
||||||
|
|
||||||
self.writer.lock().write_state_chunk(hash, compressed)?;
|
self.writer.lock().write_state_chunk(hash, compressed)?;
|
||||||
trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len());
|
trace!(target: "snapshot", "Thread {} wrote state chunk. size: {}, uncompressed size: {}", self.thread_idx, compressed_size, raw_data.len());
|
||||||
|
|
||||||
self.progress.accounts.fetch_add(num_entries, Ordering::SeqCst);
|
self.progress.accounts.fetch_add(num_entries, Ordering::SeqCst);
|
||||||
self.progress.size.fetch_add(compressed_size, Ordering::SeqCst);
|
self.progress.size.fetch_add(compressed_size as u64, Ordering::SeqCst);
|
||||||
|
|
||||||
self.hashes.push(hash);
|
self.hashes.push(hash);
|
||||||
self.cur_size = 0;
|
self.cur_size = 0;
|
||||||
@ -321,7 +331,14 @@ impl<'a> StateChunker<'a> {
|
|||||||
///
|
///
|
||||||
/// Returns a list of hashes of chunks created, or any error it may
|
/// Returns a list of hashes of chunks created, or any error it may
|
||||||
/// have encountered.
|
/// have encountered.
|
||||||
pub fn chunk_state<'a>(db: &HashDB<KeccakHasher, DBValue>, root: &H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress, part: Option<usize>) -> Result<Vec<H256>, Error> {
|
pub fn chunk_state<'a>(
|
||||||
|
db: &dyn HashDB<KeccakHasher, DBValue>,
|
||||||
|
root: &H256,
|
||||||
|
writer: &Mutex<dyn SnapshotWriter + 'a>,
|
||||||
|
progress: &'a Progress,
|
||||||
|
part: Option<usize>,
|
||||||
|
thread_idx: usize,
|
||||||
|
) -> Result<Vec<H256>, Error> {
|
||||||
let account_trie = TrieDB::new(&db, &root)?;
|
let account_trie = TrieDB::new(&db, &root)?;
|
||||||
|
|
||||||
let mut chunker = StateChunker {
|
let mut chunker = StateChunker {
|
||||||
@ -329,8 +346,9 @@ pub fn chunk_state<'a>(db: &HashDB<KeccakHasher, DBValue>, root: &H256, writer:
|
|||||||
rlps: Vec::new(),
|
rlps: Vec::new(),
|
||||||
cur_size: 0,
|
cur_size: 0,
|
||||||
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
||||||
writer: writer,
|
writer,
|
||||||
progress: progress,
|
progress,
|
||||||
|
thread_idx,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut used_code = HashSet::new();
|
let mut used_code = HashSet::new();
|
||||||
@ -365,7 +383,7 @@ pub fn chunk_state<'a>(db: &HashDB<KeccakHasher, DBValue>, root: &H256, writer:
|
|||||||
let account = ::rlp::decode(&*account_data)?;
|
let account = ::rlp::decode(&*account_data)?;
|
||||||
let account_db = AccountDB::from_hash(db, account_key_hash);
|
let account_db = AccountDB::from_hash(db, account_key_hash);
|
||||||
|
|
||||||
let fat_rlps = account::to_fat_rlps(&account_key_hash, &account, &account_db, &mut used_code, PREFERRED_CHUNK_SIZE - chunker.chunk_size(), PREFERRED_CHUNK_SIZE)?;
|
let fat_rlps = account::to_fat_rlps(&account_key_hash, &account, &account_db, &mut used_code, PREFERRED_CHUNK_SIZE - chunker.chunk_size(), PREFERRED_CHUNK_SIZE, progress)?;
|
||||||
for (i, fat_rlp) in fat_rlps.into_iter().enumerate() {
|
for (i, fat_rlp) in fat_rlps.into_iter().enumerate() {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
chunker.write_chunk()?;
|
chunker.write_chunk()?;
|
||||||
@ -383,7 +401,7 @@ pub fn chunk_state<'a>(db: &HashDB<KeccakHasher, DBValue>, root: &H256, writer:
|
|||||||
|
|
||||||
/// Used to rebuild the state trie piece by piece.
|
/// Used to rebuild the state trie piece by piece.
|
||||||
pub struct StateRebuilder {
|
pub struct StateRebuilder {
|
||||||
db: Box<JournalDB>,
|
db: Box<dyn JournalDB>,
|
||||||
state_root: H256,
|
state_root: H256,
|
||||||
known_code: HashMap<H256, H256>, // code hashes mapped to first account with this code.
|
known_code: HashMap<H256, H256>, // code hashes mapped to first account with this code.
|
||||||
missing_code: HashMap<H256, Vec<H256>>, // maps code hashes to lists of accounts missing that code.
|
missing_code: HashMap<H256, Vec<H256>>, // maps code hashes to lists of accounts missing that code.
|
||||||
@ -393,7 +411,7 @@ pub struct StateRebuilder {
|
|||||||
|
|
||||||
impl StateRebuilder {
|
impl StateRebuilder {
|
||||||
/// Create a new state rebuilder to write into the given backing DB.
|
/// Create a new state rebuilder to write into the given backing DB.
|
||||||
pub fn new(db: Arc<KeyValueDB>, pruning: Algorithm) -> Self {
|
pub fn new(db: Arc<dyn KeyValueDB>, pruning: Algorithm) -> Self {
|
||||||
StateRebuilder {
|
StateRebuilder {
|
||||||
db: journaldb::new(db.clone(), pruning, ::db::COL_STATE),
|
db: journaldb::new(db.clone(), pruning, ::db::COL_STATE),
|
||||||
state_root: KECCAK_NULL_RLP,
|
state_root: KECCAK_NULL_RLP,
|
||||||
@ -411,7 +429,7 @@ impl StateRebuilder {
|
|||||||
let mut pairs = Vec::with_capacity(rlp.item_count()?);
|
let mut pairs = Vec::with_capacity(rlp.item_count()?);
|
||||||
|
|
||||||
// initialize the pairs vector with empty values so we have slots to write into.
|
// initialize the pairs vector with empty values so we have slots to write into.
|
||||||
pairs.resize(rlp.item_count()?, (H256::new(), Vec::new()));
|
pairs.resize(rlp.item_count()?, (H256::zero(), Vec::new()));
|
||||||
|
|
||||||
let status = rebuild_accounts(
|
let status = rebuild_accounts(
|
||||||
self.db.as_hash_db_mut(),
|
self.db.as_hash_db_mut(),
|
||||||
@ -468,7 +486,7 @@ impl StateRebuilder {
|
|||||||
/// Finalize the restoration. Check for accounts missing code and make a dummy
|
/// Finalize the restoration. Check for accounts missing code and make a dummy
|
||||||
/// journal entry.
|
/// journal entry.
|
||||||
/// Once all chunks have been fed, there should be nothing missing.
|
/// Once all chunks have been fed, there should be nothing missing.
|
||||||
pub fn finalize(mut self, era: u64, id: H256) -> Result<Box<JournalDB>, ::error::Error> {
|
pub fn finalize(mut self, era: u64, id: H256) -> Result<Box<dyn JournalDB>, ::error::Error> {
|
||||||
let missing = self.missing_code.keys().cloned().collect::<Vec<_>>();
|
let missing = self.missing_code.keys().cloned().collect::<Vec<_>>();
|
||||||
if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) }
|
if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) }
|
||||||
|
|
||||||
@ -493,7 +511,7 @@ struct RebuiltStatus {
|
|||||||
// rebuild a set of accounts and their storage.
|
// rebuild a set of accounts and their storage.
|
||||||
// returns a status detailing newly-loaded code and accounts missing code.
|
// returns a status detailing newly-loaded code and accounts missing code.
|
||||||
fn rebuild_accounts(
|
fn rebuild_accounts(
|
||||||
db: &mut HashDB<KeccakHasher, DBValue>,
|
db: &mut dyn HashDB<KeccakHasher, DBValue>,
|
||||||
account_fat_rlps: Rlp,
|
account_fat_rlps: Rlp,
|
||||||
out_chunk: &mut [(H256, Bytes)],
|
out_chunk: &mut [(H256, Bytes)],
|
||||||
known_code: &HashMap<H256, H256>,
|
known_code: &HashMap<H256, H256>,
|
||||||
@ -560,7 +578,7 @@ const POW_VERIFY_RATE: f32 = 0.02;
|
|||||||
/// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform
|
/// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform
|
||||||
/// the fullest verification possible. If not, it will take a random sample to determine whether it will
|
/// the fullest verification possible. If not, it will take a random sample to determine whether it will
|
||||||
/// do heavy or light verification.
|
/// do heavy or light verification.
|
||||||
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> {
|
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &dyn EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> {
|
||||||
engine.verify_block_basic(header)?;
|
engine.verify_block_basic(header)?;
|
||||||
|
|
||||||
if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
|
if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
|
||||||
|
@ -415,7 +415,7 @@ impl Service {
|
|||||||
_ => break,
|
_ => break,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writting changes to DB and logging every now and then
|
// Writing changes to DB and logging every now and then
|
||||||
if block_number % 1_000 == 0 {
|
if block_number % 1_000 == 0 {
|
||||||
next_db.key_value().write_buffered(batch);
|
next_db.key_value().write_buffered(batch);
|
||||||
next_chain.commit();
|
next_chain.commit();
|
||||||
@ -479,16 +479,12 @@ impl Service {
|
|||||||
|
|
||||||
let guard = Guard::new(temp_dir.clone());
|
let guard = Guard::new(temp_dir.clone());
|
||||||
let res = client.take_snapshot(writer, BlockId::Number(num), &self.progress);
|
let res = client.take_snapshot(writer, BlockId::Number(num), &self.progress);
|
||||||
|
|
||||||
self.taking_snapshot.store(false, Ordering::SeqCst);
|
self.taking_snapshot.store(false, Ordering::SeqCst);
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
if client.chain_info().best_block_number >= num + client.pruning_history() {
|
if client.chain_info().best_block_number >= num + client.pruning_history() {
|
||||||
// "Cancelled" is mincing words a bit -- what really happened
|
// The state we were snapshotting was pruned before we could finish.
|
||||||
// is that the state we were snapshotting got pruned out
|
info!("Periodic snapshot failed: block state pruned. Run with a longer `--pruning-history` or with `--no-periodic-snapshot`");
|
||||||
// before we could finish.
|
return Err(e);
|
||||||
info!("Periodic snapshot failed: block state pruned.\
|
|
||||||
Run with a longer `--pruning-history` or with `--no-periodic-snapshot`");
|
|
||||||
return Ok(())
|
|
||||||
} else {
|
} else {
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
@ -846,14 +842,29 @@ impl SnapshotService for Service {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn abort_snapshot(&self) {
|
||||||
|
if self.taking_snapshot.load(Ordering::SeqCst) {
|
||||||
|
trace!(target: "snapshot", "Aborting snapshot – Snapshot under way");
|
||||||
|
self.progress.abort.store(true, Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn shutdown(&self) {
|
fn shutdown(&self) {
|
||||||
|
trace!(target: "snapshot", "Shut down SnapshotService");
|
||||||
self.abort_restore();
|
self.abort_restore();
|
||||||
|
trace!(target: "snapshot", "Shut down SnapshotService - restore aborted");
|
||||||
|
self.abort_snapshot();
|
||||||
|
trace!(target: "snapshot", "Shut down SnapshotService - snapshot aborted");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for Service {
|
impl Drop for Service {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
|
trace!(target: "shutdown", "Dropping Service");
|
||||||
self.abort_restore();
|
self.abort_restore();
|
||||||
|
trace!(target: "shutdown", "Dropping Service - restore aborted");
|
||||||
|
self.abort_snapshot();
|
||||||
|
trace!(target: "shutdown", "Dropping Service - snapshot aborted");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,14 +188,15 @@ fn keep_ancient_blocks() {
|
|||||||
&state_root,
|
&state_root,
|
||||||
&writer,
|
&writer,
|
||||||
&Progress::default(),
|
&Progress::default(),
|
||||||
None
|
None,
|
||||||
|
0
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
|
||||||
let manifest = ::snapshot::ManifestData {
|
let manifest = ::snapshot::ManifestData {
|
||||||
version: 2,
|
version: 2,
|
||||||
state_hashes: state_hashes,
|
state_hashes,
|
||||||
state_root: state_root,
|
state_root,
|
||||||
block_hashes: block_hashes,
|
block_hashes,
|
||||||
block_number: NUM_BLOCKS,
|
block_number: NUM_BLOCKS,
|
||||||
block_hash: best_hash,
|
block_hash: best_hash,
|
||||||
};
|
};
|
||||||
|
@ -55,7 +55,7 @@ fn snap_and_restore() {
|
|||||||
|
|
||||||
let mut state_hashes = Vec::new();
|
let mut state_hashes = Vec::new();
|
||||||
for part in 0..SNAPSHOT_SUBPARTS {
|
for part in 0..SNAPSHOT_SUBPARTS {
|
||||||
let mut hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), Some(part)).unwrap();
|
let mut hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), Some(part), 0).unwrap();
|
||||||
state_hashes.append(&mut hashes);
|
state_hashes.append(&mut hashes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,8 +126,8 @@ fn get_code_from_prev_chunk() {
|
|||||||
let mut make_chunk = |acc, hash| {
|
let mut make_chunk = |acc, hash| {
|
||||||
let mut db = journaldb::new_memory_db();
|
let mut db = journaldb::new_memory_db();
|
||||||
AccountDBMut::from_hash(&mut db, hash).insert(&code[..]);
|
AccountDBMut::from_hash(&mut db, hash).insert(&code[..]);
|
||||||
|
let p = Progress::default();
|
||||||
let fat_rlp = account::to_fat_rlps(&hash, &acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
|
let fat_rlp = account::to_fat_rlps(&hash, &acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value(), usize::max_value(), &p).unwrap();
|
||||||
let mut stream = RlpStream::new_list(1);
|
let mut stream = RlpStream::new_list(1);
|
||||||
stream.append_raw(&fat_rlp[0], 1);
|
stream.append_raw(&fat_rlp[0], 1);
|
||||||
stream.out()
|
stream.out()
|
||||||
@ -171,13 +171,13 @@ fn checks_flag() {
|
|||||||
let state_root = producer.state_root();
|
let state_root = producer.state_root();
|
||||||
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
|
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
|
||||||
|
|
||||||
let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), None).unwrap();
|
let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), None, 0).unwrap();
|
||||||
|
|
||||||
writer.into_inner().finish(::snapshot::ManifestData {
|
writer.into_inner().finish(::snapshot::ManifestData {
|
||||||
version: 2,
|
version: 2,
|
||||||
state_hashes: state_hashes,
|
state_hashes,
|
||||||
block_hashes: Vec::new(),
|
block_hashes: Vec::new(),
|
||||||
state_root: state_root,
|
state_root,
|
||||||
block_number: 0,
|
block_number: 0,
|
||||||
block_hash: H256::default(),
|
block_hash: H256::default(),
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
|
@ -55,6 +55,9 @@ pub trait SnapshotService : Sync + Send {
|
|||||||
/// no-op if currently restoring.
|
/// no-op if currently restoring.
|
||||||
fn restore_block_chunk(&self, hash: H256, chunk: Bytes);
|
fn restore_block_chunk(&self, hash: H256, chunk: Bytes);
|
||||||
|
|
||||||
|
/// Abort in-progress snapshotting if there is one.
|
||||||
|
fn abort_snapshot(&self);
|
||||||
|
|
||||||
/// Shutdown the Snapshot Service by aborting any ongoing restore
|
/// Shutdown the Snapshot Service by aborting any ongoing restore
|
||||||
fn shutdown(&self);
|
fn shutdown(&self);
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,6 @@ use types::{BlockNumber, header::Header};
|
|||||||
use types::transaction::SignedTransaction;
|
use types::transaction::SignedTransaction;
|
||||||
use verification::queue::kind::blocks::Unverified;
|
use verification::queue::kind::blocks::Unverified;
|
||||||
|
|
||||||
#[cfg(not(time_checked_add))]
|
|
||||||
use time_utils::CheckedSystemTime;
|
use time_utils::CheckedSystemTime;
|
||||||
|
|
||||||
/// Preprocessed block data gathered in `verify_block_unordered` call
|
/// Preprocessed block data gathered in `verify_block_unordered` call
|
||||||
@ -310,7 +309,7 @@ pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool,
|
|||||||
// this will resist overflow until `year 2037`
|
// this will resist overflow until `year 2037`
|
||||||
let max_time = SystemTime::now() + ACCEPTABLE_DRIFT;
|
let max_time = SystemTime::now() + ACCEPTABLE_DRIFT;
|
||||||
let invalid_threshold = max_time + ACCEPTABLE_DRIFT * 9;
|
let invalid_threshold = max_time + ACCEPTABLE_DRIFT * 9;
|
||||||
let timestamp = UNIX_EPOCH.checked_add(Duration::from_secs(header.timestamp()))
|
let timestamp = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp()))
|
||||||
.ok_or(BlockError::TimestampOverflow)?;
|
.ok_or(BlockError::TimestampOverflow)?;
|
||||||
|
|
||||||
if timestamp > invalid_threshold {
|
if timestamp > invalid_threshold {
|
||||||
@ -334,9 +333,9 @@ fn verify_parent(header: &Header, parent: &Header, engine: &EthEngine) -> Result
|
|||||||
|
|
||||||
if !engine.is_timestamp_valid(header.timestamp(), parent.timestamp()) {
|
if !engine.is_timestamp_valid(header.timestamp(), parent.timestamp()) {
|
||||||
let now = SystemTime::now();
|
let now = SystemTime::now();
|
||||||
let min = now.checked_add(Duration::from_secs(parent.timestamp().saturating_add(1)))
|
let min = CheckedSystemTime::checked_add(now, Duration::from_secs(parent.timestamp().saturating_add(1)))
|
||||||
.ok_or(BlockError::TimestampOverflow)?;
|
.ok_or(BlockError::TimestampOverflow)?;
|
||||||
let found = now.checked_add(Duration::from_secs(header.timestamp()))
|
let found = CheckedSystemTime::checked_add(now, Duration::from_secs(header.timestamp()))
|
||||||
.ok_or(BlockError::TimestampOverflow)?;
|
.ok_or(BlockError::TimestampOverflow)?;
|
||||||
return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(min), found })))
|
return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(min), found })))
|
||||||
}
|
}
|
||||||
|
@ -122,6 +122,8 @@ impl SnapshotService for TestSnapshotService {
|
|||||||
self.block_restoration_chunks.lock().clear();
|
self.block_restoration_chunks.lock().clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn abort_snapshot(&self) {}
|
||||||
|
|
||||||
fn restore_state_chunk(&self, hash: H256, chunk: Bytes) {
|
fn restore_state_chunk(&self, hash: H256, chunk: Bytes) {
|
||||||
if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.state_hashes.iter().any(|h| h == &hash)) {
|
if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.state_hashes.iter().any(|h| h == &hash)) {
|
||||||
self.state_restoration_chunks.lock().insert(hash, chunk);
|
self.state_restoration_chunks.lock().insert(hash, chunk);
|
||||||
|
@ -932,7 +932,7 @@ impl Configuration {
|
|||||||
no_periodic: self.args.flag_no_periodic_snapshot,
|
no_periodic: self.args.flag_no_periodic_snapshot,
|
||||||
processing_threads: match self.args.arg_snapshot_threads {
|
processing_threads: match self.args.arg_snapshot_threads {
|
||||||
Some(threads) if threads > 0 => threads,
|
Some(threads) if threads > 0 => threads,
|
||||||
_ => ::std::cmp::max(1, num_cpus::get() / 2),
|
_ => ::std::cmp::max(1, num_cpus::get_physical() / 2),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! Ethcore client application.
|
//! Ethcore client application.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
|
|
||||||
extern crate ansi_term;
|
extern crate ansi_term;
|
||||||
|
@ -893,17 +893,27 @@ impl RunningClient {
|
|||||||
// Create a weak reference to the client so that we can wait on shutdown
|
// Create a weak reference to the client so that we can wait on shutdown
|
||||||
// until it is dropped
|
// until it is dropped
|
||||||
let weak_client = Arc::downgrade(&client);
|
let weak_client = Arc::downgrade(&client);
|
||||||
// Shutdown and drop the ServiceClient
|
// Shutdown and drop the ClientService
|
||||||
client_service.shutdown();
|
client_service.shutdown();
|
||||||
|
trace!(target: "shutdown", "ClientService shut down");
|
||||||
drop(client_service);
|
drop(client_service);
|
||||||
|
trace!(target: "shutdown", "ClientService dropped");
|
||||||
// drop this stuff as soon as exit detected.
|
// drop this stuff as soon as exit detected.
|
||||||
drop(rpc);
|
drop(rpc);
|
||||||
|
trace!(target: "shutdown", "RPC dropped");
|
||||||
drop(keep_alive);
|
drop(keep_alive);
|
||||||
|
trace!(target: "shutdown", "KeepAlive dropped");
|
||||||
// to make sure timer does not spawn requests while shutdown is in progress
|
// to make sure timer does not spawn requests while shutdown is in progress
|
||||||
informant.shutdown();
|
informant.shutdown();
|
||||||
|
trace!(target: "shutdown", "Informant shut down");
|
||||||
// just Arc is dropping here, to allow other reference release in its default time
|
// just Arc is dropping here, to allow other reference release in its default time
|
||||||
drop(informant);
|
drop(informant);
|
||||||
|
trace!(target: "shutdown", "Informant dropped");
|
||||||
drop(client);
|
drop(client);
|
||||||
|
trace!(target: "shutdown", "Client dropped");
|
||||||
|
// This may help when debugging ref cycles. Requires nightly-only `#![feature(weak_counts)]`
|
||||||
|
// trace!(target: "shutdown", "Waiting for refs to Client to shutdown, strong_count={:?}, weak_count={:?}", weak_client.strong_count(), weak_client.weak_count());
|
||||||
|
trace!(target: "shutdown", "Waiting for refs to Client to shutdown");
|
||||||
wait_for_drop(weak_client);
|
wait_for_drop(weak_client);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -937,24 +947,30 @@ fn print_running_environment(data_dir: &str, dirs: &Directories, db_dirs: &Datab
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn wait_for_drop<T>(w: Weak<T>) {
|
fn wait_for_drop<T>(w: Weak<T>) {
|
||||||
let sleep_duration = Duration::from_secs(1);
|
const SLEEP_DURATION: Duration = Duration::from_secs(1);
|
||||||
let warn_timeout = Duration::from_secs(60);
|
const WARN_TIMEOUT: Duration = Duration::from_secs(60);
|
||||||
let max_timeout = Duration::from_secs(300);
|
const MAX_TIMEOUT: Duration = Duration::from_secs(300);
|
||||||
|
|
||||||
let instant = Instant::now();
|
let instant = Instant::now();
|
||||||
let mut warned = false;
|
let mut warned = false;
|
||||||
|
|
||||||
while instant.elapsed() < max_timeout {
|
while instant.elapsed() < MAX_TIMEOUT {
|
||||||
if w.upgrade().is_none() {
|
if w.upgrade().is_none() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !warned && instant.elapsed() > warn_timeout {
|
if !warned && instant.elapsed() > WARN_TIMEOUT {
|
||||||
warned = true;
|
warned = true;
|
||||||
warn!("Shutdown is taking longer than expected.");
|
warn!("Shutdown is taking longer than expected.");
|
||||||
}
|
}
|
||||||
|
|
||||||
thread::sleep(sleep_duration);
|
thread::sleep(SLEEP_DURATION);
|
||||||
|
|
||||||
|
// When debugging shutdown issues on a nightly build it can help to enable this with the
|
||||||
|
// `#![feature(weak_counts)]` added to lib.rs (TODO: enable when
|
||||||
|
// https://github.com/rust-lang/rust/issues/57977 is stable)
|
||||||
|
// trace!(target: "shutdown", "Waiting for client to drop, strong_count={:?}, weak_count={:?}", w.strong_count(), w.weak_count());
|
||||||
|
trace!(target: "shutdown", "Waiting for client to drop");
|
||||||
}
|
}
|
||||||
|
|
||||||
warn!("Shutdown timeout reached, exiting uncleanly.");
|
warn!("Shutdown timeout reached, exiting uncleanly.");
|
||||||
|
@ -261,7 +261,7 @@ impl SnapshotCommand {
|
|||||||
let cur_size = p.size();
|
let cur_size = p.size();
|
||||||
if cur_size != last_size {
|
if cur_size != last_size {
|
||||||
last_size = cur_size;
|
last_size = cur_size;
|
||||||
let bytes = ::informant::format_bytes(p.size());
|
let bytes = ::informant::format_bytes(cur_size as usize);
|
||||||
info!("Snapshot: {} accounts {} blocks {}", p.accounts(), p.blocks(), bytes);
|
info!("Snapshot: {} accounts {} blocks {}", p.accounts(), p.blocks(), bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +48,7 @@ impl SnapshotService for TestSnapshotService {
|
|||||||
fn status(&self) -> RestorationStatus { self.status.lock().clone() }
|
fn status(&self) -> RestorationStatus { self.status.lock().clone() }
|
||||||
fn begin_restore(&self, _manifest: ManifestData) { }
|
fn begin_restore(&self, _manifest: ManifestData) { }
|
||||||
fn abort_restore(&self) { }
|
fn abort_restore(&self) { }
|
||||||
|
fn abort_snapshot(&self) {}
|
||||||
fn restore_state_chunk(&self, _hash: H256, _chunk: Bytes) { }
|
fn restore_state_chunk(&self, _hash: H256, _chunk: Bytes) { }
|
||||||
fn restore_block_chunk(&self, _hash: H256, _chunk: Bytes) { }
|
fn restore_block_chunk(&self, _hash: H256, _chunk: Bytes) { }
|
||||||
fn shutdown(&self) { }
|
fn shutdown(&self) { }
|
||||||
|
@ -3,7 +3,9 @@
|
|||||||
set -e # fail on any error
|
set -e # fail on any error
|
||||||
|
|
||||||
VERSION=$(cat ./tools/VERSION)
|
VERSION=$(cat ./tools/VERSION)
|
||||||
|
TRACK=$(cat ./tools/TRACK)
|
||||||
echo "Parity Ethereum version = ${VERSION}"
|
echo "Parity Ethereum version = ${VERSION}"
|
||||||
|
echo "Parity Ethereum track = ${TRACK}"
|
||||||
|
|
||||||
test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" \
|
test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" \
|
||||||
|| ( echo "no docker credentials provided"; exit 1 )
|
|| ( echo "no docker credentials provided"; exit 1 )
|
||||||
@ -44,6 +46,14 @@ case "${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}" in
|
|||||||
--file tools/Dockerfile .;
|
--file tools/Dockerfile .;
|
||||||
docker push "parity/parity:${VERSION}-${CI_COMMIT_REF_NAME}";
|
docker push "parity/parity:${VERSION}-${CI_COMMIT_REF_NAME}";
|
||||||
docker push "parity/parity:stable";;
|
docker push "parity/parity:stable";;
|
||||||
|
v[0-9]*.[0-9]*)
|
||||||
|
echo "Docker TAG - 'parity/parity:${VERSION}-${TRACK}'"
|
||||||
|
docker build --no-cache \
|
||||||
|
--build-arg VCS_REF="${CI_COMMIT_SHA}" \
|
||||||
|
--build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" \
|
||||||
|
--tag "parity/parity:${VERSION}-${TRACK}" \
|
||||||
|
--file tools/Dockerfile .;
|
||||||
|
docker push "parity/parity:${VERSION}-${TRACK}";;
|
||||||
*)
|
*)
|
||||||
echo "Docker TAG - 'parity/parity:${VERSION}-${CI_COMMIT_REF_NAME}'"
|
echo "Docker TAG - 'parity/parity:${VERSION}-${CI_COMMIT_REF_NAME}'"
|
||||||
docker build --no-cache \
|
docker build --no-cache \
|
||||||
|
@ -168,7 +168,6 @@ pub struct Discovery<'a> {
|
|||||||
discovery_id: NodeId,
|
discovery_id: NodeId,
|
||||||
discovery_nodes: HashSet<NodeId>,
|
discovery_nodes: HashSet<NodeId>,
|
||||||
node_buckets: Vec<NodeBucket>,
|
node_buckets: Vec<NodeBucket>,
|
||||||
|
|
||||||
// Sometimes we don't want to add nodes to the NodeTable, but still want to
|
// Sometimes we don't want to add nodes to the NodeTable, but still want to
|
||||||
// keep track of them to avoid excessive pinging (happens when an unknown node sends
|
// keep track of them to avoid excessive pinging (happens when an unknown node sends
|
||||||
// a discovery request to us -- the node might be on a different net).
|
// a discovery request to us -- the node might be on a different net).
|
||||||
@ -257,7 +256,7 @@ impl<'a> Discovery<'a> {
|
|||||||
Ok(()) => None,
|
Ok(()) => None,
|
||||||
Err(BucketError::Ourselves) => None,
|
Err(BucketError::Ourselves) => None,
|
||||||
Err(BucketError::NotInTheBucket{node_entry, bucket_distance}) => Some((node_entry, bucket_distance))
|
Err(BucketError::NotInTheBucket{node_entry, bucket_distance}) => Some((node_entry, bucket_distance))
|
||||||
}.map(|(node_entry, bucket_distance)| {
|
}.and_then(|(node_entry, bucket_distance)| {
|
||||||
trace!(target: "discovery", "Adding a new node {:?} into our bucket {}", &node_entry, bucket_distance);
|
trace!(target: "discovery", "Adding a new node {:?} into our bucket {}", &node_entry, bucket_distance);
|
||||||
|
|
||||||
let mut added = HashMap::with_capacity(1);
|
let mut added = HashMap::with_capacity(1);
|
||||||
@ -265,7 +264,7 @@ impl<'a> Discovery<'a> {
|
|||||||
|
|
||||||
let node_to_ping = {
|
let node_to_ping = {
|
||||||
let bucket = &mut self.node_buckets[bucket_distance];
|
let bucket = &mut self.node_buckets[bucket_distance];
|
||||||
bucket.nodes.push_front(BucketEntry::new(node_entry));
|
bucket.nodes.push_front(BucketEntry::new(node_entry.clone()));
|
||||||
if bucket.nodes.len() > BUCKET_SIZE {
|
if bucket.nodes.len() > BUCKET_SIZE {
|
||||||
select_bucket_ping(bucket.nodes.iter())
|
select_bucket_ping(bucket.nodes.iter())
|
||||||
} else {
|
} else {
|
||||||
@ -275,7 +274,12 @@ impl<'a> Discovery<'a> {
|
|||||||
if let Some(node) = node_to_ping {
|
if let Some(node) = node_to_ping {
|
||||||
self.try_ping(node, PingReason::Default);
|
self.try_ping(node, PingReason::Default);
|
||||||
};
|
};
|
||||||
TableUpdates{added, removed: HashSet::new()}
|
|
||||||
|
if node_entry.endpoint.is_valid_sync_node() {
|
||||||
|
Some(TableUpdates { added, removed: HashSet::new() })
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,7 +522,18 @@ impl<'a> Discovery<'a> {
|
|||||||
|
|
||||||
fn on_ping(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr, echo_hash: &[u8]) -> Result<Option<TableUpdates>, Error> {
|
fn on_ping(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr, echo_hash: &[u8]) -> Result<Option<TableUpdates>, Error> {
|
||||||
trace!(target: "discovery", "Got Ping from {:?}", &from);
|
trace!(target: "discovery", "Got Ping from {:?}", &from);
|
||||||
let ping_from = NodeEndpoint::from_rlp(&rlp.at(1)?)?;
|
let ping_from = if let Ok(node_endpoint) = NodeEndpoint::from_rlp(&rlp.at(1)?) {
|
||||||
|
node_endpoint
|
||||||
|
} else {
|
||||||
|
let mut address = from.clone();
|
||||||
|
// address here is the node's tcp port. If we are unable to get the `NodeEndpoint` from the `ping_from`
|
||||||
|
// rlp field then this is most likely a BootNode, set the tcp port to 0 because it can not be used for syncing.
|
||||||
|
address.set_port(0);
|
||||||
|
NodeEndpoint {
|
||||||
|
address,
|
||||||
|
udp_port: from.port()
|
||||||
|
}
|
||||||
|
};
|
||||||
let ping_to = NodeEndpoint::from_rlp(&rlp.at(2)?)?;
|
let ping_to = NodeEndpoint::from_rlp(&rlp.at(2)?)?;
|
||||||
let timestamp: u64 = rlp.val_at(3)?;
|
let timestamp: u64 = rlp.val_at(3)?;
|
||||||
self.check_timestamp(timestamp)?;
|
self.check_timestamp(timestamp)?;
|
||||||
@ -540,7 +555,7 @@ impl<'a> Discovery<'a> {
|
|||||||
self.send_packet(PACKET_PONG, from, &response.drain())?;
|
self.send_packet(PACKET_PONG, from, &response.drain())?;
|
||||||
|
|
||||||
let entry = NodeEntry { id: *node_id, endpoint: pong_to.clone() };
|
let entry = NodeEntry { id: *node_id, endpoint: pong_to.clone() };
|
||||||
if !entry.endpoint.is_valid() {
|
if !entry.endpoint.is_valid_discovery_node() {
|
||||||
debug!(target: "discovery", "Got bad address: {:?}", entry);
|
debug!(target: "discovery", "Got bad address: {:?}", entry);
|
||||||
} else if !self.is_allowed(&entry) {
|
} else if !self.is_allowed(&entry) {
|
||||||
debug!(target: "discovery", "Address not allowed: {:?}", entry);
|
debug!(target: "discovery", "Address not allowed: {:?}", entry);
|
||||||
@ -728,7 +743,7 @@ impl<'a> Discovery<'a> {
|
|||||||
trace!(target: "discovery", "Got {} Neighbours from {:?}", results_count, &from);
|
trace!(target: "discovery", "Got {} Neighbours from {:?}", results_count, &from);
|
||||||
for r in rlp.at(0)?.iter() {
|
for r in rlp.at(0)?.iter() {
|
||||||
let endpoint = NodeEndpoint::from_rlp(&r)?;
|
let endpoint = NodeEndpoint::from_rlp(&r)?;
|
||||||
if !endpoint.is_valid() {
|
if !endpoint.is_valid_discovery_node() {
|
||||||
debug!(target: "discovery", "Bad address: {:?}", endpoint);
|
debug!(target: "discovery", "Bad address: {:?}", endpoint);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -103,10 +103,16 @@ impl NodeEndpoint {
|
|||||||
self.to_rlp(rlp);
|
self.to_rlp(rlp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validates that the port is not 0 and address IP is specified
|
/// Validates that the tcp port is not 0 and that the node is a valid discovery node (i.e. `is_valid_discovery_node()` is true).
|
||||||
pub fn is_valid(&self) -> bool {
|
/// Sync happens over tcp.
|
||||||
self.udp_port != 0 && self.address.port() != 0 &&
|
pub fn is_valid_sync_node(&self) -> bool {
|
||||||
match self.address {
|
self.is_valid_discovery_node() && self.address.port() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates that the udp port is not 0 and address IP is specified.
|
||||||
|
/// Peer discovery happens over udp.
|
||||||
|
pub fn is_valid_discovery_node(&self) -> bool {
|
||||||
|
self.udp_port != 0 && match self.address {
|
||||||
SocketAddr::V4(a) => !a.ip().is_unspecified(),
|
SocketAddr::V4(a) => !a.ip().is_unspecified(),
|
||||||
SocketAddr::V6(a) => !a.ip().is_unspecified()
|
SocketAddr::V6(a) => !a.ip().is_unspecified()
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "parity-version"
|
name = "parity-version"
|
||||||
# NOTE: this value is used for Parity Ethereum version string (via env CARGO_PKG_VERSION)
|
# NOTE: this value is used for Parity Ethereum version string (via env CARGO_PKG_VERSION)
|
||||||
version = "2.5.2"
|
version = "2.5.3"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user