Merge branch 'nvolf' into block-propagation

This commit is contained in:
Nikolay Volf 2016-02-08 01:40:15 +03:00
commit 70d59e4a52
5 changed files with 242 additions and 35 deletions

View File

@ -207,6 +207,8 @@ impl BlockQueue {
let mut verification = self.verification.lock().unwrap(); let mut verification = self.verification.lock().unwrap();
verification.unverified.clear(); verification.unverified.clear();
verification.verifying.clear(); verification.verifying.clear();
verification.verified.clear();
self.processing.write().unwrap().clear();
} }
/// Wait for queue to be empty /// Wait for queue to be empty

View File

@ -434,12 +434,11 @@ impl ChainSync {
let block_rlp = try!(r.at(0)); let block_rlp = try!(r.at(0));
let header_rlp = try!(block_rlp.at(0)); let header_rlp = try!(block_rlp.at(0));
let h = header_rlp.as_raw().sha3(); let h = header_rlp.as_raw().sha3();
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h); trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
let header_view = HeaderView::new(header_rlp.as_raw()); let header: BlockHeader = try!(header_rlp.as_val());
let mut unknown = false; let mut unknown = false;
// TODO: Decompose block and add to self.headers and self.bodies instead // TODO: Decompose block and add to self.headers and self.bodies instead
if header_view.number() == From::from(self.current_base_block() + 1) { if header.number == From::from(self.current_base_block() + 1) {
match io.chain().import_block(block_rlp.as_raw().to_vec()) { match io.chain().import_block(block_rlp.as_raw().to_vec()) {
Err(ImportError::AlreadyInChain) => { Err(ImportError::AlreadyInChain) => {
trace!(target: "sync", "New block already in chain {:?}", h); trace!(target: "sync", "New block already in chain {:?}", h);
@ -472,7 +471,7 @@ impl ChainSync {
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h); trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
{ {
let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer"); let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer");
peer.latest = header_view.sha3(); peer.latest = header.hash();
} }
self.sync_peer(io, peer_id, true); self.sync_peer(io, peer_id, true);
} }
@ -1058,7 +1057,15 @@ impl ChainSync {
match route.blocks.len() { match route.blocks.len() {
0 => None, 0 => None,
_ => { _ => {
Some(rlp::encode(&route.blocks).to_vec()) let mut rlp_stream = RlpStream::new_list(route.blocks.len());
for block_hash in route.blocks {
let mut hash_rlp = RlpStream::new_list(2);
let difficulty = chain.block_total_difficulty(&block_hash).expect("Mallformed block without a difficulty on the chain!");
hash_rlp.append(&block_hash);
hash_rlp.append(&difficulty);
rlp_stream.append_raw(&hash_rlp.out(), 1);
}
Some(rlp_stream.out())
} }
} }
}, },
@ -1068,7 +1075,10 @@ impl ChainSync {
/// creates latest block rlp for the given client /// creates latest block rlp for the given client
fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes {
chain.block(&chain.chain_info().best_block_hash).expect("Creating latest block when there is none") let mut rlp_stream = RlpStream::new_list(2);
rlp_stream.append_raw(&chain.block(&chain.chain_info().best_block_hash).expect("Creating latest block when there is none"), 1);
rlp_stream.append(&chain.chain_info().total_difficulty);
rlp_stream.out()
} }
/// returns peer ids that have less blocks than our chain /// returns peer ids that have less blocks than our chain
@ -1159,7 +1169,47 @@ mod tests {
use super::*; use super::*;
use util::*; use util::*;
use super::{PeerInfo, PeerAsking}; use super::{PeerInfo, PeerAsking};
use ethcore::header::{BlockNumber}; use ethcore::header::*;
use ethcore::client::*;
fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes {
let mut header = Header::new();
header.gas_limit = x!(0);
header.difficulty = x!(order * 100);
header.timestamp = (order * 10) as u64;
header.number = order as u64;
header.parent_hash = parent_hash;
header.state_root = H256::zero();
let mut rlp = RlpStream::new_list(3);
rlp.append(&header);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.out()
}
fn get_dummy_blocks(order: u32, parent_hash: H256) -> Bytes {
let mut rlp = RlpStream::new_list(1);
rlp.append_raw(&get_dummy_block(order, parent_hash), 1);
let difficulty: U256 = x!(100 * order);
rlp.append(&difficulty);
rlp.out()
}
fn get_dummy_hashes() -> Bytes {
let mut rlp = RlpStream::new_list(5);
for _ in 0..5 {
let mut hash_d_rlp = RlpStream::new_list(2);
let hash: H256 = H256::from(0u64);
let diff: U256 = U256::from(1u64);
hash_d_rlp.append(&hash);
hash_d_rlp.append(&diff);
rlp.append_raw(&hash_d_rlp.out(), 1);
}
rlp.out()
}
#[test] #[test]
fn return_receipts_empty() { fn return_receipts_empty() {
@ -1271,8 +1321,8 @@ mod tests {
assert!(rlp.is_none()); assert!(rlp.is_none());
let rlp = ChainSync::create_new_hashes_rlp(&client, &start, &end).unwrap(); let rlp = ChainSync::create_new_hashes_rlp(&client, &start, &end).unwrap();
// size of three rlp encoded hash // size of three rlp encoded hash-difficulty
assert_eq!(101, rlp.len()); assert_eq!(107, rlp.len());
} }
#[test] #[test]
@ -1310,4 +1360,118 @@ mod tests {
// NEW_BLOCK_PACKET // NEW_BLOCK_PACKET
assert_eq!(0x07, io.queue[0].packet_id); assert_eq!(0x07, io.queue[0].packet_id);
} }
#[test]
fn handles_peer_new_block_mallformed() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, false);
let block_data = get_dummy_block(11, client.chain_info().best_block_hash);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None);
let block = UntrustedRlp::new(&block_data);
let result = sync.on_peer_new_block(&mut io, 0, &block);
assert!(result.is_err());
}
#[test]
fn handles_peer_new_block() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, false);
let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None);
let block = UntrustedRlp::new(&block_data);
let result = sync.on_peer_new_block(&mut io, 0, &block);
assert!(result.is_ok());
}
#[test]
fn handles_peer_new_block_empty() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, false);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None);
let empty_data = vec![];
let block = UntrustedRlp::new(&empty_data);
let result = sync.on_peer_new_block(&mut io, 0, &block);
assert!(result.is_err());
}
#[test]
fn handles_peer_new_hashes() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, false);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None);
let hashes_data = get_dummy_hashes();
let hashes_rlp = UntrustedRlp::new(&hashes_data);
let result = sync.on_peer_new_hashes(&mut io, 0, &hashes_rlp);
assert!(result.is_ok());
}
#[test]
fn handles_peer_new_hashes_empty() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, false);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None);
let empty_hashes_data = vec![];
let hashes_rlp = UntrustedRlp::new(&empty_hashes_data);
let result = sync.on_peer_new_hashes(&mut io, 0, &hashes_rlp);
assert!(result.is_ok());
}
#[test]
fn hashes_rlp_mutually_acceptable() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, false);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None);
sync.propagade_new_hashes(&mut io);
let data = &io.queue[0].data.clone();
let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(&data));
assert!(result.is_ok());
}
#[test]
fn block_rlp_mutually_acceptable() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, false);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None);
sync.propagade_blocks(&mut io);
let data = &io.queue[0].data.clone();
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(&data));
assert!(result.is_ok());
}
} }

View File

@ -138,7 +138,7 @@ fn propagade_hashes() {
#[test] #[test]
fn propagade_blocks() { fn propagade_blocks() {
let mut net = TestNet::new(10); let mut net = TestNet::new(2);
net.peer_mut(1).chain.add_blocks(10, false); net.peer_mut(1).chain.add_blocks(10, false);
net.sync(); net.sync();

View File

@ -79,7 +79,7 @@ impl TestBlockChainClient {
impl BlockChainClient for TestBlockChainClient { impl BlockChainClient for TestBlockChainClient {
fn block_total_difficulty(&self, _h: &H256) -> Option<U256> { fn block_total_difficulty(&self, _h: &H256) -> Option<U256> {
unimplemented!(); Some(U256::zero())
} }
fn block_header(&self, h: &H256) -> Option<Bytes> { fn block_header(&self, h: &H256) -> Option<Bytes> {

View File

@ -92,7 +92,6 @@ impl JournalDB {
/// Commit all recent insert operations and historical removals from the old era /// Commit all recent insert operations and historical removals from the old era
/// to the backing database. /// to the backing database.
#[allow(cyclomatic_complexity)]
pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> { pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format: // journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
@ -105,6 +104,17 @@ impl JournalDB {
// for each end_era that we journaled that we are no passing by, // for each end_era that we journaled that we are no passing by,
// we remove all of its removes assuming it is canonical and all // we remove all of its removes assuming it is canonical and all
// of its inserts otherwise. // of its inserts otherwise.
//
// We also keep reference counters for each key inserted in the journal to handle
// the following cases where key K must not be deleted from the DB when processing removals :
// Given H is the journal size in eras, 0 <= C <= H.
// Key K is removed in era A(N) and re-inserted in canonical era B(N + C).
// Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C).
// Key K is added in non-canonical era A'(N) canonical B(N + C).
//
// The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions
// is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased
// and the key is safe to delete.
// record new commit's details. // record new commit's details.
let batch = WriteBatch::new(); let batch = WriteBatch::new();
@ -125,6 +135,7 @@ impl JournalDB {
let mut r = RlpStream::new_list(3); let mut r = RlpStream::new_list(3);
let inserts: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); let inserts: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect();
// Increase counter for each inserted key no matter if the block is canonical or not.
for i in &inserts { for i in &inserts {
*counters.entry(i.clone()).or_insert(0) += 1; *counters.entry(i.clone()).or_insert(0) += 1;
} }
@ -139,6 +150,8 @@ impl JournalDB {
if let Some((end_era, canon_id)) = end { if let Some((end_era, canon_id)) = end {
let mut index = 0usize; let mut index = 0usize;
let mut last; let mut last;
let mut to_remove: Vec<H256> = Vec::new();
let mut canon_inserts: Vec<H256> = Vec::new();
while let Some(rlp_data) = try!(self.backing.get({ while let Some(rlp_data) = try!(self.backing.get({
let mut r = RlpStream::new_list(2); let mut r = RlpStream::new_list(2);
r.append(&end_era); r.append(&end_era);
@ -146,39 +159,33 @@ impl JournalDB {
last = r.drain(); last = r.drain();
&last &last
})) { })) {
let to_add;
let rlp = Rlp::new(&rlp_data); let rlp = Rlp::new(&rlp_data);
{ let inserts: Vec<H256> = rlp.val_at(1);
to_add = rlp.val_at(1); JournalDB::decrease_counters(&inserts, &mut counters);
for i in &to_add { // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical
let delete_counter = { if canon_id == rlp.val_at(0) {
if let Some(mut cnt) = counters.get_mut(i) { to_remove.extend(rlp.at(2).iter().map(|r| r.as_val::<H256>()));
*cnt -= 1; canon_inserts = inserts;
*cnt == 0
} }
else { false } else {
to_remove.extend(inserts);
};
if delete_counter {
counters.remove(i);
} }
}
}
let to_remove: Vec<H256> = if canon_id == rlp.val_at(0) {rlp.val_at(2)} else {to_add};
for i in &to_remove {
if !counters.contains_key(i) {
batch.delete(&i).expect("Low-level database error. Some issue with your hard disk?");
}
}
try!(batch.delete(&last)); try!(batch.delete(&last));
trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len());
index += 1; index += 1;
} }
let canon_inserts = canon_inserts.drain(..).collect::<HashSet<_>>();
// Purge removed keys if they are not referenced and not re-inserted in the canon commit
let mut deletes = 0;
for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) {
try!(batch.delete(&h));
deletes += 1;
}
try!(batch.put(&LAST_ERA_KEY, &encode(&end_era))); try!(batch.put(&LAST_ERA_KEY, &encode(&end_era)));
trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, deletes);
} }
// Commit overlay insertions
let mut ret = 0u32; let mut ret = 0u32;
let mut deletes = 0usize; let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() { for i in self.overlay.drain().into_iter() {
@ -200,6 +207,23 @@ impl JournalDB {
Ok(ret) Ok(ret)
} }
// Decrease counters for given keys. Deletes obsolete counters
fn decrease_counters(keys: &[H256], counters: &mut HashMap<H256, i32>) {
for i in keys.iter() {
let delete_counter = {
if let Some(mut cnt) = counters.get_mut(i) {
*cnt -= 1;
*cnt == 0
}
else { false }
};
if delete_counter {
counters.remove(i);
}
}
}
fn payload(&self, key: &H256) -> Option<Bytes> { fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
} }
@ -387,4 +411,21 @@ mod tests {
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo)); assert!(jdb.exists(&foo));
} }
#[test]
fn fork_same_key() {
// history is 1
let mut jdb = JournalDB::new_temp();
jdb.commit(0, &b"0".sha3(), None).unwrap();
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
} }