Merge bugfixes from master to beta (#1605)

* Attempt to fix blochchain DB sync

* Fix bloomchain on blockchain repair

* Make sure reserved peers are in the node table

* fixed #1606 (#1615)
This commit is contained in:
Arkadiy Paronyan 2016-07-14 12:52:07 +02:00 committed by Gav Wood
parent 69847e3b8b
commit ed5d797662
7 changed files with 46 additions and 17 deletions

View File

@ -30,7 +30,7 @@ use blockchain::best_block::BestBlock;
use types::tree_route::TreeRoute; use types::tree_route::TreeRoute;
use blockchain::update::ExtrasUpdate; use blockchain::update::ExtrasUpdate;
use blockchain::{CacheSize, ImportRoute, Config}; use blockchain::{CacheSize, ImportRoute, Config};
use db::{Writable, Readable, CacheUpdatePolicy}; use db::{Writable, Readable, CacheUpdatePolicy, Key};
const LOG_BLOOMS_LEVELS: usize = 3; const LOG_BLOOMS_LEVELS: usize = 3;
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16; const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
@ -295,7 +295,34 @@ impl BlockChain {
// load best block // load best block
let best_block_hash = match bc.extras_db.get(b"best").unwrap() { let best_block_hash = match bc.extras_db.get(b"best").unwrap() {
Some(best) => H256::from_slice(&best), Some(best) => {
let best = H256::from_slice(&best);
let mut b = best.clone();
let mut removed = 0;
let mut best_num = 0;
while !bc.blocks_db.get(&b).unwrap().is_some() {
// track back to the best block we have in the blocks database
let extras: BlockDetails = bc.extras_db.read(&b).unwrap();
type DetailsKey = Key<BlockDetails, Target=H264>;
bc.extras_db.delete(&(DetailsKey::key(&b))).unwrap();
b = extras.parent;
best_num = extras.number;
removed += 1;
}
if b != best {
let batch = DBTransaction::new();
let range = (best_num + 1) as bc::Number .. (best_num + removed) as bc::Number;
let chain = bc::group::BloomGroupChain::new(bc.blooms_config, &bc);
let changes = chain.replace(&range, vec![]);
for (k, v) in changes.into_iter() {
batch.write(&LogGroupPosition::from(k), &BloomGroup::from(v));
}
batch.put(b"best", &b).unwrap();
bc.extras_db.write(batch).unwrap();
info!("Restored mismatched best block. Was: {}, new: {}", best.hex(), b.hex());
}
b
}
None => { None => {
// best block does not exist // best block does not exist
// we need to insert genesis into the cache // we need to insert genesis into the cache

View File

@ -20,7 +20,7 @@ use util::HeapSizeOf;
use basic_types::LogBloom; use basic_types::LogBloom;
/// Helper structure representing bloom of the trace. /// Helper structure representing bloom of the trace.
#[derive(Clone)] #[derive(Debug, Clone)]
pub struct Bloom(LogBloom); pub struct Bloom(LogBloom);
impl From<LogBloom> for Bloom { impl From<LogBloom> for Bloom {

View File

@ -20,7 +20,7 @@ use util::HeapSizeOf;
use super::Bloom; use super::Bloom;
/// Represents group of X consecutive blooms. /// Represents group of X consecutive blooms.
#[derive(Clone)] #[derive(Debug, Clone)]
pub struct BloomGroup { pub struct BloomGroup {
blooms: Vec<Bloom>, blooms: Vec<Bloom>,
} }

View File

@ -15,7 +15,6 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trace database. //! Trace database.
use std::ptr;
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{RwLock, Arc}; use std::sync::{RwLock, Arc};
@ -47,9 +46,7 @@ impl Key<BlockTraces> for H256 {
fn key(&self) -> H264 { fn key(&self) -> H264 {
let mut result = H264::default(); let mut result = H264::default();
result[0] = TraceDBIndex::BlockTraces as u8; result[0] = TraceDBIndex::BlockTraces as u8;
unsafe { result[1..33].copy_from_slice(self);
ptr::copy(self.as_ptr(), result.as_mut_ptr().offset(1), 32);
}
result result
} }
} }
@ -84,9 +81,9 @@ impl Key<blooms::BloomGroup> for TraceGroupPosition {
result[0] = TraceDBIndex::BloomGroups as u8; result[0] = TraceDBIndex::BloomGroups as u8;
result[1] = self.0.level; result[1] = self.0.level;
result[2] = self.0.index as u8; result[2] = self.0.index as u8;
result[3] = (self.0.index << 8) as u8; result[3] = (self.0.index >> 8) as u8;
result[4] = (self.0.index << 16) as u8; result[4] = (self.0.index >> 16) as u8;
result[5] = (self.0.index << 24) as u8; result[5] = (self.0.index >> 24) as u8;
TraceGroupKey(result) TraceGroupKey(result)
} }
} }

View File

@ -30,7 +30,7 @@ use std::collections::VecDeque;
/// Addresses filter. /// Addresses filter.
/// ///
/// Used to create bloom possibilities and match filters. /// Used to create bloom possibilities and match filters.
#[derive(Binary)] #[derive(Debug, Binary)]
pub struct AddressesFilter { pub struct AddressesFilter {
list: Vec<Address> list: Vec<Address>
} }
@ -76,7 +76,7 @@ impl AddressesFilter {
} }
} }
#[derive(Binary)] #[derive(Debug, Binary)]
/// Traces filter. /// Traces filter.
pub struct Filter { pub struct Filter {
/// Block range. /// Block range.

View File

@ -435,6 +435,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
let entry = NodeEntry { endpoint: n.endpoint.clone(), id: n.id.clone() }; let entry = NodeEntry { endpoint: n.endpoint.clone(), id: n.id.clone() };
self.reserved_nodes.write().unwrap().insert(n.id.clone()); self.reserved_nodes.write().unwrap().insert(n.id.clone());
self.nodes.write().unwrap().add_node(Node::new(entry.id.clone(), entry.endpoint.clone()));
if let Some(ref mut discovery) = *self.discovery.lock().unwrap() { if let Some(ref mut discovery) = *self.discovery.lock().unwrap() {
discovery.add_node(entry); discovery.add_node(entry);
@ -758,7 +759,9 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e); trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e);
if let UtilError::Network(NetworkError::Disconnect(DisconnectReason::IncompatibleProtocol)) = e { if let UtilError::Network(NetworkError::Disconnect(DisconnectReason::IncompatibleProtocol)) = e {
if let Some(id) = s.id() { if let Some(id) = s.id() {
self.nodes.write().unwrap().mark_as_useless(id); if !self.reserved_nodes.read().unwrap().contains(id) {
self.nodes.write().unwrap().mark_as_useless(id);
}
} }
} }
kill = true; kill = true;
@ -892,7 +895,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
trace!(target: "network", "Removed from node table: {}", i); trace!(target: "network", "Removed from node table: {}", i);
self.kill_connection(i, io, false); self.kill_connection(i, io, false);
} }
self.nodes.write().unwrap().update(node_changes); self.nodes.write().unwrap().update(node_changes, &*self.reserved_nodes.read().unwrap());
} }
} }

View File

@ -236,13 +236,15 @@ impl NodeTable {
} }
/// Apply table changes coming from discovery /// Apply table changes coming from discovery
pub fn update(&mut self, mut update: TableUpdates) { pub fn update(&mut self, mut update: TableUpdates, reserved: &HashSet<NodeId>) {
for (_, node) in update.added.drain() { for (_, node) in update.added.drain() {
let mut entry = self.nodes.entry(node.id.clone()).or_insert_with(|| Node::new(node.id.clone(), node.endpoint.clone())); let mut entry = self.nodes.entry(node.id.clone()).or_insert_with(|| Node::new(node.id.clone(), node.endpoint.clone()));
entry.endpoint = node.endpoint; entry.endpoint = node.endpoint;
} }
for r in update.removed { for r in update.removed {
self.nodes.remove(&r); if !reserved.contains(&r) {
self.nodes.remove(&r);
}
} }
} }