Merge branch 'master' of github.com:ethcore/parity into sync
This commit is contained in:
commit
b997a61d4a
@ -3,7 +3,7 @@
|
|||||||
"engineName": "Ethash",
|
"engineName": "Ethash",
|
||||||
"params": {
|
"params": {
|
||||||
"accountStartNonce": "0x00",
|
"accountStartNonce": "0x00",
|
||||||
"frontierCompatibilityModeLimit": "0xdbba0",
|
"frontierCompatibilityModeLimit": "0xf4240",
|
||||||
"maximumExtraDataSize": "0x20",
|
"maximumExtraDataSize": "0x20",
|
||||||
"tieBreakingGas": false,
|
"tieBreakingGas": false,
|
||||||
"minGasLimit": "0x1388",
|
"minGasLimit": "0x1388",
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
"engineName": "Ethash",
|
"engineName": "Ethash",
|
||||||
"params": {
|
"params": {
|
||||||
"accountStartNonce": "0x00",
|
"accountStartNonce": "0x00",
|
||||||
"frontierCompatibilityModeLimit": "0x0dbba0",
|
"frontierCompatibilityModeLimit": "0xf4240",
|
||||||
"maximumExtraDataSize": "0x20",
|
"maximumExtraDataSize": "0x20",
|
||||||
"tieBreakingGas": false,
|
"tieBreakingGas": false,
|
||||||
"minGasLimit": "0x1388",
|
"minGasLimit": "0x1388",
|
||||||
|
@ -10,7 +10,7 @@ use crossbeam;
|
|||||||
/// Max depth to avoid stack overflow (when it's reached we start a new thread with VM)
|
/// Max depth to avoid stack overflow (when it's reached we start a new thread with VM)
|
||||||
/// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132)
|
/// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132)
|
||||||
/// Maybe something like here: https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp
|
/// Maybe something like here: https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp
|
||||||
const MAX_VM_DEPTH_FOR_THREAD: usize = 128;
|
const MAX_VM_DEPTH_FOR_THREAD: usize = 64;
|
||||||
|
|
||||||
/// Returns new address created from address and given nonce.
|
/// Returns new address created from address and given nonce.
|
||||||
pub fn contract_address(address: &Address, nonce: &U256) -> Address {
|
pub fn contract_address(address: &Address, nonce: &U256) -> Address {
|
||||||
|
@ -1,6 +1,3 @@
|
|||||||
use std::env;
|
|
||||||
use log::{LogLevelFilter};
|
|
||||||
use env_logger::LogBuilder;
|
|
||||||
use super::test_common::*;
|
use super::test_common::*;
|
||||||
use client::{BlockChainClient,Client};
|
use client::{BlockChainClient,Client};
|
||||||
use pod_state::*;
|
use pod_state::*;
|
||||||
@ -13,24 +10,8 @@ pub enum ChainEra {
|
|||||||
Homestead,
|
Homestead,
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref LOG_DUMMY: bool = {
|
|
||||||
let mut builder = LogBuilder::new();
|
|
||||||
builder.filter(None, LogLevelFilter::Info);
|
|
||||||
|
|
||||||
if let Ok(log) = env::var("RUST_LOG") {
|
|
||||||
builder.parse(&log);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(_) = builder.init() {
|
|
||||||
println!("logger initialized");
|
|
||||||
}
|
|
||||||
true
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec<String> {
|
pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec<String> {
|
||||||
let _ = LOG_DUMMY.deref();
|
init_log();
|
||||||
let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid");
|
let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid");
|
||||||
let mut failed = Vec::new();
|
let mut failed = Vec::new();
|
||||||
|
|
||||||
|
@ -86,6 +86,20 @@ fn create_unverifiable_block(order: u32, parent_hash: H256) -> Bytes {
|
|||||||
create_test_block(&create_unverifiable_block_header(order, parent_hash))
|
create_test_block(&create_unverifiable_block_header(order, parent_hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn create_test_block_with_data(header: &Header, transactions: &[&Transaction], uncles: &[Header]) -> Bytes {
|
||||||
|
let mut rlp = RlpStream::new_list(3);
|
||||||
|
rlp.append(header);
|
||||||
|
rlp.append_list(transactions.len());
|
||||||
|
for t in transactions {
|
||||||
|
rlp.append_raw(&t.rlp_bytes_opt(Seal::With), 1);
|
||||||
|
}
|
||||||
|
rlp.append_list(uncles.len());
|
||||||
|
for h in uncles {
|
||||||
|
rlp.append(h);
|
||||||
|
}
|
||||||
|
rlp.out()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult<Arc<Client>> {
|
pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult<Arc<Client>> {
|
||||||
let dir = RandomTempPath::new();
|
let dir = RandomTempPath::new();
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ fn do_json_test(json_data: &[u8]) -> Vec<String> {
|
|||||||
let schedule = match test.find("blocknumber")
|
let schedule = match test.find("blocknumber")
|
||||||
.and_then(|j| j.as_string())
|
.and_then(|j| j.as_string())
|
||||||
.and_then(|s| BlockNumber::from_str(s).ok())
|
.and_then(|s| BlockNumber::from_str(s).ok())
|
||||||
.unwrap_or(0) { x if x < 900000 => &old_schedule, _ => &new_schedule };
|
.unwrap_or(0) { x if x < 1_000_000 => &old_schedule, _ => &new_schedule };
|
||||||
let rlp = Bytes::from_json(&test["rlp"]);
|
let rlp = Bytes::from_json(&test["rlp"]);
|
||||||
let res = UntrustedRlp::new(&rlp).as_val().map_err(From::from).and_then(|t: Transaction| t.validate(schedule, schedule.have_delegate_call));
|
let res = UntrustedRlp::new(&rlp).as_val().map_err(From::from).and_then(|t: Transaction| t.validate(schedule, schedule.have_delegate_call));
|
||||||
fail_unless(test.find("transaction").is_none() == res.is_err());
|
fail_unless(test.find("transaction").is_none() == res.is_err());
|
||||||
|
@ -221,28 +221,7 @@ mod tests {
|
|||||||
use spec::*;
|
use spec::*;
|
||||||
use transaction::*;
|
use transaction::*;
|
||||||
use basic_types::*;
|
use basic_types::*;
|
||||||
|
use tests::helpers::*;
|
||||||
fn create_test_block(header: &Header) -> Bytes {
|
|
||||||
let mut rlp = RlpStream::new_list(3);
|
|
||||||
rlp.append(header);
|
|
||||||
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
|
|
||||||
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
|
|
||||||
rlp.out()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_test_block_with_data(header: &Header, transactions: &[&Transaction], uncles: &[Header]) -> Bytes {
|
|
||||||
let mut rlp = RlpStream::new_list(3);
|
|
||||||
rlp.append(header);
|
|
||||||
rlp.append_list(transactions.len());
|
|
||||||
for t in transactions {
|
|
||||||
rlp.append_raw(&t.rlp_bytes_opt(Seal::With), 1);
|
|
||||||
}
|
|
||||||
rlp.append_list(uncles.len());
|
|
||||||
for h in uncles {
|
|
||||||
rlp.append(h);
|
|
||||||
}
|
|
||||||
rlp.out()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_ok(result: Result<(), Error>) {
|
fn check_ok(result: Result<(), Error>) {
|
||||||
result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e));
|
result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e));
|
||||||
|
@ -21,6 +21,7 @@ use client::{BlockChainClient, BlockStatus};
|
|||||||
use sync::range_collection::{RangeCollection, ToUsize, FromUsize};
|
use sync::range_collection::{RangeCollection, ToUsize, FromUsize};
|
||||||
use error::*;
|
use error::*;
|
||||||
use sync::io::SyncIo;
|
use sync::io::SyncIo;
|
||||||
|
use std::option::Option;
|
||||||
|
|
||||||
impl ToUsize for BlockNumber {
|
impl ToUsize for BlockNumber {
|
||||||
fn to_usize(&self) -> usize {
|
fn to_usize(&self) -> usize {
|
||||||
@ -99,14 +100,14 @@ pub struct SyncStatus {
|
|||||||
pub protocol_version: u8,
|
pub protocol_version: u8,
|
||||||
/// BlockChain height for the moment the sync started.
|
/// BlockChain height for the moment the sync started.
|
||||||
pub start_block_number: BlockNumber,
|
pub start_block_number: BlockNumber,
|
||||||
/// Last fully downloaded and imported block number.
|
/// Last fully downloaded and imported block number (if any).
|
||||||
pub last_imported_block_number: BlockNumber,
|
pub last_imported_block_number: Option<BlockNumber>,
|
||||||
/// Highest block number in the download queue.
|
/// Highest block number in the download queue (if any).
|
||||||
pub highest_block_number: BlockNumber,
|
pub highest_block_number: Option<BlockNumber>,
|
||||||
/// Total number of blocks for the sync process.
|
/// Total number of blocks for the sync process.
|
||||||
pub blocks_total: usize,
|
pub blocks_total: BlockNumber,
|
||||||
/// Number of blocks downloaded so far.
|
/// Number of blocks downloaded so far.
|
||||||
pub blocks_received: usize,
|
pub blocks_received: BlockNumber,
|
||||||
/// Total number of connected peers
|
/// Total number of connected peers
|
||||||
pub num_peers: usize,
|
pub num_peers: usize,
|
||||||
/// Total number of active peers
|
/// Total number of active peers
|
||||||
@ -147,7 +148,7 @@ pub struct ChainSync {
|
|||||||
/// Last block number for the start of sync
|
/// Last block number for the start of sync
|
||||||
starting_block: BlockNumber,
|
starting_block: BlockNumber,
|
||||||
/// Highest block number seen
|
/// Highest block number seen
|
||||||
highest_block: BlockNumber,
|
highest_block: Option<BlockNumber>,
|
||||||
/// Set of block header numbers being downloaded
|
/// Set of block header numbers being downloaded
|
||||||
downloading_headers: HashSet<BlockNumber>,
|
downloading_headers: HashSet<BlockNumber>,
|
||||||
/// Set of block body numbers being downloaded
|
/// Set of block body numbers being downloaded
|
||||||
@ -161,9 +162,9 @@ pub struct ChainSync {
|
|||||||
/// Used to map body to header
|
/// Used to map body to header
|
||||||
header_ids: HashMap<HeaderId, BlockNumber>,
|
header_ids: HashMap<HeaderId, BlockNumber>,
|
||||||
/// Last impoted block number
|
/// Last impoted block number
|
||||||
last_imported_block: BlockNumber,
|
last_imported_block: Option<BlockNumber>,
|
||||||
/// Last impoted block hash
|
/// Last impoted block hash
|
||||||
last_imported_hash: H256,
|
last_imported_hash: Option<H256>,
|
||||||
/// Syncing total difficulty
|
/// Syncing total difficulty
|
||||||
syncing_difficulty: U256,
|
syncing_difficulty: U256,
|
||||||
/// True if common block for our and remote chain has been found
|
/// True if common block for our and remote chain has been found
|
||||||
@ -177,15 +178,15 @@ impl ChainSync {
|
|||||||
ChainSync {
|
ChainSync {
|
||||||
state: SyncState::NotSynced,
|
state: SyncState::NotSynced,
|
||||||
starting_block: 0,
|
starting_block: 0,
|
||||||
highest_block: 0,
|
highest_block: None,
|
||||||
downloading_headers: HashSet::new(),
|
downloading_headers: HashSet::new(),
|
||||||
downloading_bodies: HashSet::new(),
|
downloading_bodies: HashSet::new(),
|
||||||
headers: Vec::new(),
|
headers: Vec::new(),
|
||||||
bodies: Vec::new(),
|
bodies: Vec::new(),
|
||||||
peers: HashMap::new(),
|
peers: HashMap::new(),
|
||||||
header_ids: HashMap::new(),
|
header_ids: HashMap::new(),
|
||||||
last_imported_block: 0,
|
last_imported_block: None,
|
||||||
last_imported_hash: H256::new(),
|
last_imported_hash: None,
|
||||||
syncing_difficulty: U256::from(0u64),
|
syncing_difficulty: U256::from(0u64),
|
||||||
have_common_block: false,
|
have_common_block: false,
|
||||||
}
|
}
|
||||||
@ -199,8 +200,8 @@ impl ChainSync {
|
|||||||
start_block_number: self.starting_block,
|
start_block_number: self.starting_block,
|
||||||
last_imported_block_number: self.last_imported_block,
|
last_imported_block_number: self.last_imported_block,
|
||||||
highest_block_number: self.highest_block,
|
highest_block_number: self.highest_block,
|
||||||
blocks_received: (self.last_imported_block - self.starting_block) as usize,
|
blocks_received: match self.last_imported_block { None => 0, Some(x) => x - self.starting_block },
|
||||||
blocks_total: (self.highest_block - self.starting_block) as usize,
|
blocks_total: match self.highest_block { None => 0, Some(x) => x - self.starting_block },
|
||||||
num_peers: self.peers.len(),
|
num_peers: self.peers.len(),
|
||||||
num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(),
|
num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(),
|
||||||
}
|
}
|
||||||
@ -229,10 +230,10 @@ impl ChainSync {
|
|||||||
/// Restart sync
|
/// Restart sync
|
||||||
pub fn restart(&mut self, io: &mut SyncIo) {
|
pub fn restart(&mut self, io: &mut SyncIo) {
|
||||||
self.reset();
|
self.reset();
|
||||||
self.last_imported_block = 0;
|
self.last_imported_block = None;
|
||||||
self.last_imported_hash = H256::new();
|
self.last_imported_hash = None;
|
||||||
self.starting_block = 0;
|
self.starting_block = 0;
|
||||||
self.highest_block = 0;
|
self.highest_block = None;
|
||||||
self.have_common_block = false;
|
self.have_common_block = false;
|
||||||
io.chain().clear_queue();
|
io.chain().clear_queue();
|
||||||
self.starting_block = io.chain().chain_info().best_block_number;
|
self.starting_block = io.chain().chain_info().best_block_number;
|
||||||
@ -293,25 +294,27 @@ impl ChainSync {
|
|||||||
for i in 0..item_count {
|
for i in 0..item_count {
|
||||||
let info: BlockHeader = try!(r.val_at(i));
|
let info: BlockHeader = try!(r.val_at(i));
|
||||||
let number = BlockNumber::from(info.number);
|
let number = BlockNumber::from(info.number);
|
||||||
if number <= self.last_imported_block || self.headers.have_item(&number) {
|
if number <= self.current_base_block() || self.headers.have_item(&number) {
|
||||||
trace!(target: "sync", "Skipping existing block header");
|
trace!(target: "sync", "Skipping existing block header");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if number > self.highest_block {
|
|
||||||
self.highest_block = number;
|
if self.highest_block == None || number > self.highest_block.unwrap() {
|
||||||
|
self.highest_block = Some(number);
|
||||||
}
|
}
|
||||||
let hash = info.hash();
|
let hash = info.hash();
|
||||||
match io.chain().block_status(&hash) {
|
match io.chain().block_status(&hash) {
|
||||||
BlockStatus::InChain => {
|
BlockStatus::InChain => {
|
||||||
self.have_common_block = true;
|
self.have_common_block = true;
|
||||||
self.last_imported_block = number;
|
self.last_imported_block = Some(number);
|
||||||
self.last_imported_hash = hash.clone();
|
self.last_imported_hash = Some(hash.clone());
|
||||||
trace!(target: "sync", "Found common header {} ({})", number, hash);
|
trace!(target: "sync", "Found common header {} ({})", number, hash);
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
if self.have_common_block {
|
if self.have_common_block {
|
||||||
//validate chain
|
//validate chain
|
||||||
if self.have_common_block && number == self.last_imported_block + 1 && info.parent_hash != self.last_imported_hash {
|
let base_hash = self.last_imported_hash.clone().unwrap();
|
||||||
|
if self.have_common_block && number == self.current_base_block() + 1 && info.parent_hash != base_hash {
|
||||||
// TODO: lower peer rating
|
// TODO: lower peer rating
|
||||||
debug!(target: "sync", "Mismatched block header {} {}", number, hash);
|
debug!(target: "sync", "Mismatched block header {} {}", number, hash);
|
||||||
continue;
|
continue;
|
||||||
@ -407,7 +410,7 @@ impl ChainSync {
|
|||||||
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
|
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
|
||||||
let header_view = HeaderView::new(header_rlp.as_raw());
|
let header_view = HeaderView::new(header_rlp.as_raw());
|
||||||
// TODO: Decompose block and add to self.headers and self.bodies instead
|
// TODO: Decompose block and add to self.headers and self.bodies instead
|
||||||
if header_view.number() == From::from(self.last_imported_block + 1) {
|
if header_view.number() == From::from(self.current_base_block() + 1) {
|
||||||
match io.chain().import_block(block_rlp.as_raw().to_vec()) {
|
match io.chain().import_block(block_rlp.as_raw().to_vec()) {
|
||||||
Err(ImportError::AlreadyInChain) => {
|
Err(ImportError::AlreadyInChain) => {
|
||||||
trace!(target: "sync", "New block already in chain {:?}", h);
|
trace!(target: "sync", "New block already in chain {:?}", h);
|
||||||
@ -550,6 +553,10 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn current_base_block(&self) -> BlockNumber {
|
||||||
|
match self.last_imported_block { None => 0, Some(x) => x }
|
||||||
|
}
|
||||||
|
|
||||||
/// Find some headers or blocks to download for a peer.
|
/// Find some headers or blocks to download for a peer.
|
||||||
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) {
|
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) {
|
||||||
self.clear_peer_download(peer_id);
|
self.clear_peer_download(peer_id);
|
||||||
@ -563,7 +570,7 @@ impl ChainSync {
|
|||||||
let mut needed_bodies: Vec<H256> = Vec::new();
|
let mut needed_bodies: Vec<H256> = Vec::new();
|
||||||
let mut needed_numbers: Vec<BlockNumber> = Vec::new();
|
let mut needed_numbers: Vec<BlockNumber> = Vec::new();
|
||||||
|
|
||||||
if self.have_common_block && !self.headers.is_empty() && self.headers.range_iter().next().unwrap().0 == self.last_imported_block + 1 {
|
if self.have_common_block && !self.headers.is_empty() && self.headers.range_iter().next().unwrap().0 == self.current_base_block() + 1 {
|
||||||
for (start, ref items) in self.headers.range_iter() {
|
for (start, ref items) in self.headers.range_iter() {
|
||||||
if needed_bodies.len() > MAX_BODIES_TO_REQUEST {
|
if needed_bodies.len() > MAX_BODIES_TO_REQUEST {
|
||||||
break;
|
break;
|
||||||
@ -596,12 +603,12 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
if start == 0 {
|
if start == 0 {
|
||||||
self.have_common_block = true; //reached genesis
|
self.have_common_block = true; //reached genesis
|
||||||
self.last_imported_hash = chain_info.genesis_hash;
|
self.last_imported_hash = Some(chain_info.genesis_hash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if self.have_common_block {
|
if self.have_common_block {
|
||||||
let mut headers: Vec<BlockNumber> = Vec::new();
|
let mut headers: Vec<BlockNumber> = Vec::new();
|
||||||
let mut prev = self.last_imported_block + 1;
|
let mut prev = self.current_base_block() + 1;
|
||||||
for (next, ref items) in self.headers.range_iter() {
|
for (next, ref items) in self.headers.range_iter() {
|
||||||
if !headers.is_empty() {
|
if !headers.is_empty() {
|
||||||
break;
|
break;
|
||||||
@ -656,7 +663,7 @@ impl ChainSync {
|
|||||||
{
|
{
|
||||||
let headers = self.headers.range_iter().next().unwrap();
|
let headers = self.headers.range_iter().next().unwrap();
|
||||||
let bodies = self.bodies.range_iter().next().unwrap();
|
let bodies = self.bodies.range_iter().next().unwrap();
|
||||||
if headers.0 != bodies.0 || headers.0 != self.last_imported_block + 1 {
|
if headers.0 != bodies.0 || headers.0 != self.current_base_block() + 1 {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -672,18 +679,18 @@ impl ChainSync {
|
|||||||
match io.chain().import_block(block_rlp.out()) {
|
match io.chain().import_block(block_rlp.out()) {
|
||||||
Err(ImportError::AlreadyInChain) => {
|
Err(ImportError::AlreadyInChain) => {
|
||||||
trace!(target: "sync", "Block already in chain {:?}", h);
|
trace!(target: "sync", "Block already in chain {:?}", h);
|
||||||
self.last_imported_block = headers.0 + i as BlockNumber;
|
self.last_imported_block = Some(headers.0 + i as BlockNumber);
|
||||||
self.last_imported_hash = h.clone();
|
self.last_imported_hash = Some(h.clone());
|
||||||
},
|
},
|
||||||
Err(ImportError::AlreadyQueued) => {
|
Err(ImportError::AlreadyQueued) => {
|
||||||
trace!(target: "sync", "Block already queued {:?}", h);
|
trace!(target: "sync", "Block already queued {:?}", h);
|
||||||
self.last_imported_block = headers.0 + i as BlockNumber;
|
self.last_imported_block = Some(headers.0 + i as BlockNumber);
|
||||||
self.last_imported_hash = h.clone();
|
self.last_imported_hash = Some(h.clone());
|
||||||
},
|
},
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
trace!(target: "sync", "Block queued {:?}", h);
|
trace!(target: "sync", "Block queued {:?}", h);
|
||||||
self.last_imported_block = headers.0 + i as BlockNumber;
|
self.last_imported_block = Some(headers.0 + i as BlockNumber);
|
||||||
self.last_imported_hash = h.clone();
|
self.last_imported_hash = Some(h.clone());
|
||||||
imported += 1;
|
imported += 1;
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -700,8 +707,8 @@ impl ChainSync {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.headers.remove_head(&(self.last_imported_block + 1));
|
self.headers.remove_head(&(self.last_imported_block.unwrap() + 1));
|
||||||
self.bodies.remove_head(&(self.last_imported_block + 1));
|
self.bodies.remove_head(&(self.last_imported_block.unwrap() + 1));
|
||||||
|
|
||||||
if self.headers.is_empty() {
|
if self.headers.is_empty() {
|
||||||
assert!(self.bodies.is_empty());
|
assert!(self.bodies.is_empty());
|
||||||
|
@ -4,7 +4,7 @@ use block_queue::BlockQueueInfo;
|
|||||||
use header::{Header as BlockHeader, BlockNumber};
|
use header::{Header as BlockHeader, BlockNumber};
|
||||||
use error::*;
|
use error::*;
|
||||||
use sync::io::SyncIo;
|
use sync::io::SyncIo;
|
||||||
use sync::chain::ChainSync;
|
use sync::chain::{ChainSync, SyncState};
|
||||||
|
|
||||||
struct TestBlockChainClient {
|
struct TestBlockChainClient {
|
||||||
blocks: RwLock<HashMap<H256, Bytes>>,
|
blocks: RwLock<HashMap<H256, Bytes>>,
|
||||||
@ -241,13 +241,15 @@ struct TestPeer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct TestNet {
|
struct TestNet {
|
||||||
peers: Vec<TestPeer>
|
peers: Vec<TestPeer>,
|
||||||
|
started: bool
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TestNet {
|
impl TestNet {
|
||||||
pub fn new(n: usize) -> TestNet {
|
pub fn new(n: usize) -> TestNet {
|
||||||
let mut net = TestNet {
|
let mut net = TestNet {
|
||||||
peers: Vec::new(),
|
peers: Vec::new(),
|
||||||
|
started: false
|
||||||
};
|
};
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
net.peers.push(TestPeer {
|
net.peers.push(TestPeer {
|
||||||
@ -291,10 +293,28 @@ impl TestNet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sync(&mut self) {
|
pub fn restart_peer(&mut self, i: usize) {
|
||||||
|
let peer = self.peer_mut(i);
|
||||||
|
peer.sync.restart(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sync(&mut self) -> u32 {
|
||||||
self.start();
|
self.start();
|
||||||
|
let mut total_steps = 0;
|
||||||
while !self.done() {
|
while !self.done() {
|
||||||
self.sync_step()
|
self.sync_step();
|
||||||
|
total_steps = total_steps + 1;
|
||||||
|
}
|
||||||
|
total_steps
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sync_steps(&mut self, count: usize) {
|
||||||
|
if !self.started {
|
||||||
|
self.start();
|
||||||
|
self.started = true;
|
||||||
|
}
|
||||||
|
for _ in 0..count {
|
||||||
|
self.sync_step();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -303,9 +323,8 @@ impl TestNet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn full_sync_two_peers() {
|
fn chain_two_peers() {
|
||||||
::env_logger::init().ok();
|
::env_logger::init().ok();
|
||||||
let mut net = TestNet::new(3);
|
let mut net = TestNet::new(3);
|
||||||
net.peer_mut(1).chain.add_blocks(1000, false);
|
net.peer_mut(1).chain.add_blocks(1000, false);
|
||||||
@ -316,7 +335,27 @@ fn full_sync_two_peers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn full_sync_empty_blocks() {
|
fn chain_status_after_sync() {
|
||||||
|
::env_logger::init().ok();
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(1000, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(1000, false);
|
||||||
|
net.sync();
|
||||||
|
let status = net.peer(0).sync.status();
|
||||||
|
assert_eq!(status.state, SyncState::Idle);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn chain_takes_few_steps() {
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(100, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(100, false);
|
||||||
|
let total_steps = net.sync();
|
||||||
|
assert!(total_steps < 7);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn chain_empty_blocks() {
|
||||||
::env_logger::init().ok();
|
::env_logger::init().ok();
|
||||||
let mut net = TestNet::new(3);
|
let mut net = TestNet::new(3);
|
||||||
for n in 0..200 {
|
for n in 0..200 {
|
||||||
@ -329,7 +368,7 @@ fn full_sync_empty_blocks() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn forked_sync() {
|
fn chain_forged() {
|
||||||
::env_logger::init().ok();
|
::env_logger::init().ok();
|
||||||
let mut net = TestNet::new(3);
|
let mut net = TestNet::new(3);
|
||||||
net.peer_mut(0).chain.add_blocks(300, false);
|
net.peer_mut(0).chain.add_blocks(300, false);
|
||||||
@ -347,3 +386,25 @@ fn forked_sync() {
|
|||||||
assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn chain_restart() {
|
||||||
|
let mut net = TestNet::new(3);
|
||||||
|
net.peer_mut(1).chain.add_blocks(1000, false);
|
||||||
|
net.peer_mut(2).chain.add_blocks(1000, false);
|
||||||
|
|
||||||
|
net.sync_steps(8);
|
||||||
|
|
||||||
|
// make sure that sync has actually happened
|
||||||
|
assert!(net.peer(0).chain.chain_info().best_block_number > 100);
|
||||||
|
net.restart_peer(0);
|
||||||
|
|
||||||
|
let status = net.peer(0).sync.status();
|
||||||
|
assert_eq!(status.state, SyncState::NotSynced);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn chain_status_empty() {
|
||||||
|
let net = TestNet::new(2);
|
||||||
|
assert_eq!(net.peer(0).sync.status().state, SyncState::NotSynced);
|
||||||
|
}
|
||||||
|
@ -43,8 +43,6 @@ extern crate tiny_keccak;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate heapsize;
|
extern crate heapsize;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate lazy_static;
|
extern crate lazy_static;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate itertools;
|
extern crate itertools;
|
||||||
@ -56,6 +54,8 @@ extern crate arrayvec;
|
|||||||
extern crate elastic_array;
|
extern crate elastic_array;
|
||||||
extern crate crossbeam;
|
extern crate crossbeam;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate log as rlog;
|
||||||
|
|
||||||
/// TODO [Gav Wood] Please document me
|
/// TODO [Gav Wood] Please document me
|
||||||
pub mod standard;
|
pub mod standard;
|
||||||
@ -98,6 +98,7 @@ pub mod semantic_version;
|
|||||||
pub mod io;
|
pub mod io;
|
||||||
/// TODO [Gav Wood] Please document me
|
/// TODO [Gav Wood] Please document me
|
||||||
pub mod network;
|
pub mod network;
|
||||||
|
pub mod log;
|
||||||
|
|
||||||
pub use common::*;
|
pub use common::*;
|
||||||
pub use misc::*;
|
pub use misc::*;
|
||||||
@ -118,3 +119,4 @@ pub use squeeze::*;
|
|||||||
pub use semantic_version::*;
|
pub use semantic_version::*;
|
||||||
pub use network::*;
|
pub use network::*;
|
||||||
pub use io::*;
|
pub use io::*;
|
||||||
|
pub use log::*;
|
||||||
|
26
util/src/log.rs
Normal file
26
util/src/log.rs
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
//! Common log helper functions
|
||||||
|
|
||||||
|
use std::env;
|
||||||
|
use rlog::{LogLevelFilter};
|
||||||
|
use env_logger::LogBuilder;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref LOG_DUMMY: bool = {
|
||||||
|
let mut builder = LogBuilder::new();
|
||||||
|
builder.filter(None, LogLevelFilter::Info);
|
||||||
|
|
||||||
|
if let Ok(log) = env::var("RUST_LOG") {
|
||||||
|
builder.parse(&log);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(_) = builder.init() {
|
||||||
|
println!("logger initialized");
|
||||||
|
}
|
||||||
|
true
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Intialize log with default settings
|
||||||
|
pub fn init_log() {
|
||||||
|
let _ = *LOG_DUMMY;
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user