parent
ea3efd926e
commit
837e8b8725
@ -749,7 +749,7 @@ impl BlockChain {
|
||||
}
|
||||
|
||||
if hash != bc.genesis_hash() {
|
||||
trace!("First block calculated: {:?}", hash);
|
||||
info!("First new block calculated: {:?}", hash);
|
||||
let mut batch = db.key_value().transaction();
|
||||
batch.put(db::COL_EXTRA, b"first", &hash);
|
||||
db.key_value().write(batch).expect("Low level database error when writing 'first' block. Some issue with disk?");
|
||||
|
@ -531,6 +531,10 @@ impl Importer {
|
||||
.unwrap_or_else(|e| panic!("Receipt bytes should be valid: {:?}", e));
|
||||
let _import_lock = self.import_lock.lock();
|
||||
|
||||
if unverified.header.number() >= chain.best_block_header().number() {
|
||||
panic!("Ancient block number is higher then best block number");
|
||||
}
|
||||
|
||||
{
|
||||
trace_time!("import_old_block");
|
||||
// verify the block, passing the chain for updating the epoch verifier.
|
||||
@ -1201,15 +1205,27 @@ impl Client {
|
||||
|
||||
/// Get a copy of the best block's state.
|
||||
pub fn latest_state_and_header(&self) -> (State<StateDB>, Header) {
|
||||
let header = self.best_block_header();
|
||||
let state = State::from_existing(
|
||||
self.state_db.read().boxed_clone_canon(&header.hash()),
|
||||
*header.state_root(),
|
||||
self.engine.account_start_nonce(header.number()),
|
||||
self.factories.clone(),
|
||||
)
|
||||
.expect("State root of best block header always valid.");
|
||||
(state, header)
|
||||
let mut nb_tries = 5;
|
||||
// Here, we are taking latest block and then latest state. If in between those two calls `best` block got prunned app will panic.
|
||||
// This is something that should not happend often and it is edge case.
|
||||
// Locking read best_block lock would be more straighforward, but can introduce overlaping locks,
|
||||
// because of this we are just taking 5 tries to get best state in most cases it will work on first try.
|
||||
while nb_tries != 0 {
|
||||
let header = self.best_block_header();
|
||||
match State::from_existing(
|
||||
self.state_db.read().boxed_clone_canon(&header.hash()),
|
||||
*header.state_root(),
|
||||
self.engine.account_start_nonce(header.number()),
|
||||
self.factories.clone(),
|
||||
) {
|
||||
Ok(ret) => return (ret, header),
|
||||
Err(_) => {
|
||||
warn!("Couldn't fetch state of best block header: {:?}", header);
|
||||
nb_tries -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
panic!("Couldn't get latest state in 5 tries");
|
||||
}
|
||||
|
||||
/// Attempt to get a copy of a specific block's final state.
|
||||
|
@ -241,7 +241,7 @@ impl BlockDownloader {
|
||||
self.last_round_start_hash = start_hash.clone();
|
||||
self.imported_this_round = None;
|
||||
self.round_parents = VecDeque::new();
|
||||
self.target_hash = None;
|
||||
//self.target_hash = None; // target_hash is only used for old (ancient) block download. And once set should not be reseted in any way.
|
||||
self.retract_step = 1;
|
||||
}
|
||||
|
||||
@ -516,8 +516,24 @@ impl BlockDownloader {
|
||||
self.last_imported_hash
|
||||
);
|
||||
} else {
|
||||
let best = io.chain().chain_info().best_block_number;
|
||||
let best_hash = io.chain().chain_info().best_block_hash;
|
||||
let (best, best_hash) = match self.block_set {
|
||||
BlockSet::NewBlocks => (
|
||||
io.chain().chain_info().best_block_number,
|
||||
io.chain().chain_info().best_block_hash,
|
||||
),
|
||||
BlockSet::OldBlocks => {
|
||||
if let (Some(best), Some(best_hash)) = (
|
||||
io.chain().chain_info().ancient_block_number,
|
||||
io.chain().chain_info().ancient_block_hash,
|
||||
) {
|
||||
(best, best_hash) // best ancient block number and hash.
|
||||
} else {
|
||||
// None on ancient block/hash means that all ancient are already downloaded and stored in DB.
|
||||
self.state = State::Complete;
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
let oldest_reorg = io.chain().pruning_info().earliest_state;
|
||||
if self.block_set == BlockSet::NewBlocks && best > start && start < oldest_reorg
|
||||
{
|
||||
@ -531,7 +547,7 @@ impl BlockDownloader {
|
||||
} else {
|
||||
let n = start - cmp::min(self.retract_step, start);
|
||||
if n == 0 {
|
||||
debug_sync!(self, "Header not found, bottom line reached, resetting, last imported: {}", self.last_imported_hash);
|
||||
info!("Header not found, bottom line reached, resetting, last imported: {}", self.last_imported_hash);
|
||||
self.reset_to_block(&best_hash, best);
|
||||
} else {
|
||||
self.retract_step *= 2;
|
||||
@ -547,11 +563,9 @@ impl BlockDownloader {
|
||||
);
|
||||
}
|
||||
None => {
|
||||
debug_sync!(
|
||||
self,
|
||||
info!(
|
||||
"Could not revert to previous block, last: {} ({})",
|
||||
start,
|
||||
self.last_imported_hash
|
||||
start, self.last_imported_hash
|
||||
);
|
||||
self.reset_to_block(&best_hash, best);
|
||||
}
|
||||
@ -661,7 +675,10 @@ impl BlockDownloader {
|
||||
|
||||
if self.target_hash.as_ref().map_or(false, |t| t == &h) {
|
||||
self.state = State::Complete;
|
||||
trace_sync!(self, "Sync target reached");
|
||||
info!(
|
||||
"Sync target {:?} for old blocks reached. Syncing ancient blocks finished.",
|
||||
self.target_hash
|
||||
);
|
||||
return download_action;
|
||||
}
|
||||
|
||||
|
@ -969,15 +969,17 @@ impl ChainSync {
|
||||
if let (Some(ancient_block_hash), Some(ancient_block_number)) =
|
||||
(chain.ancient_block_hash, chain.ancient_block_number)
|
||||
{
|
||||
trace!(target: "sync", "Downloading old blocks from {:?} (#{}) till {:?} (#{:?})", ancient_block_hash, ancient_block_number, chain.first_block_hash, chain.first_block_number);
|
||||
info!(target: "sync", "Downloading old blocks from {:?} (#{}) till {:?} (#{:?})", ancient_block_hash, ancient_block_number, chain.first_block_hash, chain.first_block_number);
|
||||
let mut downloader = BlockDownloader::new(
|
||||
BlockSet::OldBlocks,
|
||||
&ancient_block_hash,
|
||||
ancient_block_number,
|
||||
);
|
||||
if let Some(hash) = chain.first_block_hash {
|
||||
trace!(target: "sync", "Downloader target set to {:?}", hash);
|
||||
trace!(target: "sync", "Downloader target for old blocks is set to {:?}", hash);
|
||||
downloader.set_target(&hash);
|
||||
} else {
|
||||
trace!(target: "sync", "Downloader target could not be found");
|
||||
}
|
||||
self.old_blocks = Some(downloader);
|
||||
}
|
||||
@ -1114,10 +1116,18 @@ impl ChainSync {
|
||||
let equal_or_higher_difficulty = peer_difficulty.map_or(true, |pd| pd >= syncing_difficulty);
|
||||
|
||||
if force || equal_or_higher_difficulty {
|
||||
if let Some(request) = self.old_blocks.as_mut().and_then(|d| d.request_blocks(peer_id, io, num_active_peers)) {
|
||||
SyncRequester::request_blocks(self, io, peer_id, request, BlockSet::OldBlocks);
|
||||
return;
|
||||
}
|
||||
let mut is_complete = false;
|
||||
if let Some(old_blocks) = self.old_blocks.as_mut() {
|
||||
if let Some(request) = old_blocks.request_blocks(peer_id, io, num_active_peers) {
|
||||
SyncRequester::request_blocks(self, io, peer_id, request, BlockSet::OldBlocks);
|
||||
return;
|
||||
}
|
||||
is_complete = old_blocks.is_complete();
|
||||
|
||||
}
|
||||
if is_complete { // if old_blocks is in complete state, set it to None.
|
||||
self.old_blocks = None;
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
target: "sync",
|
||||
@ -1344,7 +1354,7 @@ impl ChainSync {
|
||||
},
|
||||
SyncState::SnapshotWaiting => match io.snapshot_service().restoration_status() {
|
||||
RestorationStatus::Inactive => {
|
||||
trace!(target:"sync", "Snapshot restoration is complete");
|
||||
info!(target:"sync", "Snapshot restoration is complete");
|
||||
self.restart(io);
|
||||
}
|
||||
RestorationStatus::Initializing { .. } => {
|
||||
@ -1443,7 +1453,7 @@ impl ChainSync {
|
||||
SyncPropagator::propagate_proposed_blocks(self, io, proposed);
|
||||
}
|
||||
if !invalid.is_empty() {
|
||||
trace!(target: "sync", "Bad blocks in the queue, restarting");
|
||||
info!(target: "sync", "Bad blocks in the queue, restarting sync");
|
||||
self.restart(io);
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,6 @@ impl CacheSizes {
|
||||
|
||||
pub struct SyncInfo {
|
||||
last_imported_block_number: BlockNumber,
|
||||
last_imported_old_block_number: Option<BlockNumber>,
|
||||
num_peers: usize,
|
||||
max_peers: u32,
|
||||
snapshot_sync: bool,
|
||||
@ -160,7 +159,6 @@ impl InformantData for FullNodeInformantData {
|
||||
last_imported_block_number: status
|
||||
.last_imported_block_number
|
||||
.unwrap_or(chain_info.best_block_number),
|
||||
last_imported_old_block_number: status.last_imported_old_block_number,
|
||||
num_peers: status.num_peers,
|
||||
max_peers: status
|
||||
.current_max_peers(*num_peers_range.start(), *num_peers_range.end()),
|
||||
@ -265,7 +263,7 @@ impl<T: InformantData> Informant<T> {
|
||||
false => t,
|
||||
};
|
||||
|
||||
info!(target: "import", "{} {} {} {}",
|
||||
info!(target: "import", "{}{} {} {} {}",
|
||||
match importing {
|
||||
true => match snapshot_sync {
|
||||
false => format!("Syncing {} {} {} {}+{} Qed",
|
||||
@ -301,18 +299,22 @@ impl<T: InformantData> Informant<T> {
|
||||
},
|
||||
false => String::new(),
|
||||
},
|
||||
match chain_info.ancient_block_number {
|
||||
Some(ancient_number) => format!(" (Ancient:#{})", ancient_number),
|
||||
None => String::new(),
|
||||
},
|
||||
match sync_info.as_ref() {
|
||||
Some(ref sync_info) => format!("{}{}/{} peers",
|
||||
match importing {
|
||||
true => format!("{}",
|
||||
if self.target.executes_transactions() {
|
||||
paint(Green.bold(), format!("{:>8} ", format!("#{}", sync_info.last_imported_block_number)))
|
||||
paint(Green.bold(), format!("{:>8} ", format!("LI:#{}", sync_info.last_imported_block_number)))
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
),
|
||||
false => match sync_info.last_imported_old_block_number {
|
||||
Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("#{}", number)))),
|
||||
false => match chain_info.ancient_block_number {
|
||||
Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("AB:#{}", number)))),
|
||||
None => String::new(),
|
||||
}
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user