Merge branch 'master' into finduncles

This commit is contained in:
Gav Wood 2016-03-03 15:16:56 +01:00
commit 52dceff3ea
22 changed files with 520 additions and 147 deletions

9
Cargo.lock generated
View File

@ -227,6 +227,7 @@ dependencies = [
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)",
"transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -821,6 +822,14 @@ name = "traitobject"
version = "0.0.1" version = "0.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "transient-hashmap"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "typeable" name = "typeable"
version = "0.1.2" version = "0.1.2"

View File

@ -466,7 +466,8 @@ impl BlockChain {
let mut write_details = self.block_details.write().unwrap(); let mut write_details = self.block_details.write().unwrap();
for (hash, details) in update.block_details.into_iter() { for (hash, details) in update.block_details.into_iter() {
batch.put_extras(&hash, &details); batch.put_extras(&hash, &details);
write_details.insert(hash, details); write_details.insert(hash.clone(), details);
self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash));
} }
let mut write_receipts = self.block_receipts.write().unwrap(); let mut write_receipts = self.block_receipts.write().unwrap();
@ -802,6 +803,14 @@ impl BlockChain {
// TODO: handle block_hashes properly. // TODO: handle block_hashes properly.
block_hashes.clear(); block_hashes.clear();
blocks.shrink_to_fit();
block_details.shrink_to_fit();
block_hashes.shrink_to_fit();
transaction_addresses.shrink_to_fit();
block_logs.shrink_to_fit();
blocks_blooms.shrink_to_fit();
block_receipts.shrink_to_fit();
} }
if self.cache_size().total() < self.max_cache_size { break; } if self.cache_size().total() < self.max_cache_size { break; }
} }

View File

@ -123,6 +123,9 @@ pub trait BlockChainClient : Sync + Send {
/// Get block total difficulty. /// Get block total difficulty.
fn block_total_difficulty(&self, id: BlockId) -> Option<U256>; fn block_total_difficulty(&self, id: BlockId) -> Option<U256>;
/// Get block hash.
fn block_hash(&self, id: BlockId) -> Option<H256>;
/// Get address code. /// Get address code.
fn code(&self, address: &Address) -> Option<Bytes>; fn code(&self, address: &Address) -> Option<Bytes>;
@ -540,6 +543,11 @@ impl BlockChainClient for Client {
Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty)
} }
fn block_hash(&self, id: BlockId) -> Option<H256> {
let chain = self.chain.read().unwrap();
Self::block_hash(&chain, id)
}
fn code(&self, address: &Address) -> Option<Bytes> { fn code(&self, address: &Address) -> Option<Bytes> {
self.state().code(address) self.state().code(address)
} }

View File

@ -42,6 +42,22 @@ pub struct Filter {
pub topics: [Option<Vec<H256>>; 4], pub topics: [Option<Vec<H256>>; 4],
} }
impl Clone for Filter {
fn clone(&self) -> Self {
let mut topics = [None, None, None, None];
for i in 0..4 {
topics[i] = self.topics[i].clone();
}
Filter {
from_block: self.from_block.clone(),
to_block: self.to_block.clone(),
address: self.address.clone(),
topics: topics
}
}
}
impl Filter { impl Filter {
/// Returns combinations of each address and topic. /// Returns combinations of each address and topic.
pub fn bloom_possibilities(&self) -> Vec<H2048> { pub fn bloom_possibilities(&self) -> Vec<H2048> {

View File

@ -108,6 +108,25 @@ fn can_collect_garbage() {
assert!(client.blockchain_cache_info().blocks < 100 * 1024); assert!(client.blockchain_cache_info().blocks < 100 * 1024);
} }
#[test]
fn can_handle_long_fork() {
let client_result = generate_dummy_client(1200);
let client = client_result.reference();
for _ in 0..10 {
client.import_verified_blocks(&IoChannel::disconnected());
}
assert_eq!(1200, client.chain_info().best_block_number);
push_blocks_to_client(client, 45, 1201, 800);
push_blocks_to_client(client, 49, 1201, 800);
push_blocks_to_client(client, 53, 1201, 600);
for _ in 0..20 {
client.import_verified_blocks(&IoChannel::disconnected());
}
assert_eq!(2000, client.chain_info().best_block_number);
}
#[test] #[test]
fn can_mine() { fn can_mine() {
let dummy_blocks = get_good_dummy_block_seq(2); let dummy_blocks = get_good_dummy_block_seq(2);

View File

@ -156,10 +156,9 @@ pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult<Arc<Client>
rolling_block_number = rolling_block_number + 1; rolling_block_number = rolling_block_number + 1;
rolling_timestamp = rolling_timestamp + 10; rolling_timestamp = rolling_timestamp + 10;
if let Err(_) = client.import_block(create_test_block(&header)) { if let Err(e) = client.import_block(create_test_block(&header)) {
panic!("error importing block which is valid by definition"); panic!("error importing block which is valid by definition: {:?}", e);
} }
} }
client.flush_queue(); client.flush_queue();
client.import_verified_blocks(&IoChannel::disconnected()); client.import_verified_blocks(&IoChannel::disconnected());
@ -170,6 +169,34 @@ pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult<Arc<Client>
} }
} }
pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) {
let test_spec = get_test_spec();
let test_engine = test_spec.to_engine().unwrap();
let state_root = test_engine.spec().genesis_header().state_root;
let mut rolling_hash = client.chain_info().best_block_hash;
let mut rolling_block_number = starting_number as u64;
let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10;
for _ in 0..block_number {
let mut header = Header::new();
header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap());
header.difficulty = decode(test_engine.spec().engine_params.get("minimumDifficulty").unwrap());
header.timestamp = rolling_timestamp;
header.number = rolling_block_number;
header.parent_hash = rolling_hash;
header.state_root = state_root.clone();
rolling_hash = header.hash();
rolling_block_number = rolling_block_number + 1;
rolling_timestamp = rolling_timestamp + 10;
if let Err(e) = client.import_block(create_test_block(&header)) {
panic!("error importing block which is valid by definition: {:?}", e);
}
}
}
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<Client>> { pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<Client>> {
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap(); let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
@ -253,18 +280,29 @@ pub fn get_temp_state_in(path: &Path) -> State {
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> { pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let test_engine = test_spec.to_engine().unwrap(); let test_engine = test_spec.to_engine().unwrap();
let mut parent = test_engine.spec().genesis_header().hash(); get_good_dummy_block_fork_seq(1, count, &test_engine.spec().genesis_header().hash())
}
pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec<Bytes> {
let test_spec = get_test_spec();
let test_engine = test_spec.to_engine().unwrap();
let mut rolling_timestamp = start_number as u64 * 10;
let mut parent = *parent_hash;
let mut r = Vec::new(); let mut r = Vec::new();
for i in 1 .. count + 1 { for i in start_number .. start_number + count + 1 {
let mut block_header = Header::new(); let mut block_header = Header::new();
block_header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap()); block_header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap());
block_header.difficulty = decode(test_engine.spec().engine_params.get("minimumDifficulty").unwrap()); block_header.difficulty = U256::from(i).mul(U256([0, 1, 0, 0]));
block_header.timestamp = i as u64; block_header.timestamp = rolling_timestamp;
block_header.number = i as u64; block_header.number = i as u64;
block_header.parent_hash = parent; block_header.parent_hash = parent;
block_header.state_root = test_engine.spec().genesis_header().state_root; block_header.state_root = test_engine.spec().genesis_header().state_root;
parent = block_header.hash(); parent = block_header.hash();
rolling_timestamp = rolling_timestamp + 10;
r.push(create_test_block(&block_header)); r.push(create_test_block(&block_header));
} }
r r
} }

View File

@ -20,6 +20,7 @@ ethash = { path = "../ethash" }
ethsync = { path = "../sync" } ethsync = { path = "../sync" }
clippy = { version = "0.0.44", optional = true } clippy = { version = "0.0.44", optional = true }
rustc-serialize = "0.3" rustc-serialize = "0.3"
transient-hashmap = "0.1"
serde_macros = { version = "0.7.0", optional = true } serde_macros = { version = "0.7.0", optional = true }
[build-dependencies] [build-dependencies]

View File

@ -9,8 +9,8 @@ mod inner {
pub fn main() { pub fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap(); let out_dir = env::var_os("OUT_DIR").unwrap();
let src = Path::new("src/lib.rs.in"); let src = Path::new("src/v1/types/mod.rs.in");
let dst = Path::new(&out_dir).join("lib.rs"); let dst = Path::new(&out_dir).join("mod.rs");
let mut registry = syntex::Registry::new(); let mut registry = syntex::Registry::new();

View File

@ -27,9 +27,35 @@ extern crate jsonrpc_http_server;
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate ethcore; extern crate ethcore;
extern crate ethsync; extern crate ethsync;
extern crate transient_hashmap;
#[cfg(feature = "serde_macros")] use self::jsonrpc_core::{IoHandler, IoDelegate};
include!("lib.rs.in");
#[cfg(not(feature = "serde_macros"))] pub mod v1;
include!(concat!(env!("OUT_DIR"), "/lib.rs"));
/// Http server.
pub struct HttpServer {
handler: IoHandler,
threads: usize
}
impl HttpServer {
/// Construct new http server object with given number of threads.
pub fn new(threads: usize) -> HttpServer {
HttpServer {
handler: IoHandler::new(),
threads: threads
}
}
/// Add io delegate.
pub fn add_delegate<D>(&mut self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
self.handler.add_delegate(delegate);
}
/// Start server asynchronously in new thread
pub fn start_async(self, addr: &str, cors_domain: &str) {
let server = jsonrpc_http_server::Server::new(self.handler, self.threads);
server.start_async(addr, jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain.to_owned()))
}
}

View File

@ -1,30 +0,0 @@
use self::jsonrpc_core::{IoHandler, IoDelegate};
pub mod v1;
/// Http server.
pub struct HttpServer {
handler: IoHandler,
threads: usize
}
impl HttpServer {
/// Construct new http server object with given number of threads.
pub fn new(threads: usize) -> HttpServer {
HttpServer {
handler: IoHandler::new(),
threads: threads
}
}
/// Add io delegate.
pub fn add_delegate<D>(&mut self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
self.handler.add_delegate(delegate);
}
/// Start server asynchronously in new thread
pub fn start_async(self, addr: &str, cors_domain: &str) {
let server = jsonrpc_http_server::Server::new(self.handler, self.threads);
server.start_async(addr, jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain.to_owned()))
}
}

21
rpc/src/v1/helpers/mod.rs Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
mod poll_manager;
mod poll_filter;
pub use self::poll_manager::PollManager;
pub use self::poll_filter::PollFilter;

View File

@ -0,0 +1,10 @@
//! Helper type with all filter possibilities.
use ethcore::filter::Filter;
#[derive(Clone)]
pub enum PollFilter {
Block,
PendingTransaction,
Logs(Filter)
}

View File

@ -0,0 +1,144 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Indexes all rpc poll requests.
use transient_hashmap::{TransientHashMap, Timer, StandardTimer};
/// Lifetime of poll (in seconds).
const POLL_LIFETIME: u64 = 60;
pub type PollId = usize;
pub type BlockNumber = u64;
pub struct PollInfo<F> {
pub filter: F,
pub block_number: BlockNumber
}
impl<F> Clone for PollInfo<F> where F: Clone {
fn clone(&self) -> Self {
PollInfo {
filter: self.filter.clone(),
block_number: self.block_number.clone()
}
}
}
/// Indexes all poll requests.
///
/// Lazily garbage collects unused polls info.
pub struct PollManager<F, T = StandardTimer> where T: Timer {
polls: TransientHashMap<PollId, PollInfo<F>, T>,
next_available_id: PollId
}
impl<F> PollManager<F, StandardTimer> {
/// Creates new instance of indexer.
pub fn new() -> Self {
PollManager::new_with_timer(Default::default())
}
}
impl<F, T> PollManager<F, T> where T: Timer {
pub fn new_with_timer(timer: T) -> Self {
PollManager {
polls: TransientHashMap::new_with_timer(POLL_LIFETIME, timer),
next_available_id: 0,
}
}
/// Returns id which can be used for new poll.
///
/// Stores information when last poll happend.
pub fn create_poll(&mut self, filter: F, block: BlockNumber) -> PollId {
self.polls.prune();
let id = self.next_available_id;
self.next_available_id += 1;
self.polls.insert(id, PollInfo {
filter: filter,
block_number: block,
});
id
}
/// Updates information when last poll happend.
pub fn update_poll(&mut self, id: &PollId, block: BlockNumber) {
self.polls.prune();
if let Some(info) = self.polls.get_mut(id) {
info.block_number = block;
}
}
/// Returns number of block when last poll happend.
pub fn get_poll_info(&mut self, id: &PollId) -> Option<&PollInfo<F>> {
self.polls.prune();
self.polls.get(id)
}
/// Removes poll info.
pub fn remove_poll(&mut self, id: &PollId) {
self.polls.remove(id);
}
}
#[cfg(test)]
mod tests {
use std::cell::RefCell;
use transient_hashmap::Timer;
use v1::helpers::PollManager;
struct TestTimer<'a> {
time: &'a RefCell<i64>,
}
impl<'a> Timer for TestTimer<'a> {
fn get_time(&self) -> i64 {
*self.time.borrow()
}
}
#[test]
fn test_poll_indexer() {
let time = RefCell::new(0);
let timer = TestTimer {
time: &time,
};
let mut indexer = PollManager::new_with_timer(timer);
assert_eq!(indexer.create_poll(false, 20), 0);
assert_eq!(indexer.create_poll(true, 20), 1);
*time.borrow_mut() = 10;
indexer.update_poll(&0, 21);
assert_eq!(indexer.get_poll_info(&0).unwrap().filter, false);
assert_eq!(indexer.get_poll_info(&0).unwrap().block_number, 21);
*time.borrow_mut() = 30;
indexer.update_poll(&1, 23);
assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true);
assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23);
*time.borrow_mut() = 75;
indexer.update_poll(&0, 30);
assert!(indexer.get_poll_info(&0).is_none());
assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true);
assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23);
indexer.remove_poll(&1);
assert!(indexer.get_poll_info(&1).is_none());
}
}

View File

@ -15,11 +15,12 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Eth rpc implementation. //! Eth rpc implementation.
use std::collections::HashMap;
use std::sync::{Arc, Weak, Mutex, RwLock};
use ethsync::{EthSync, SyncState}; use ethsync::{EthSync, SyncState};
use jsonrpc_core::*; use jsonrpc_core::*;
use util::numbers::*; use util::numbers::*;
use util::sha3::*; use util::sha3::*;
use util::standard::{RwLock, HashMap, Arc, Weak};
use util::rlp::encode; use util::rlp::encode;
use ethcore::client::*; use ethcore::client::*;
use ethcore::block::{IsBlock}; use ethcore::block::{IsBlock};
@ -29,6 +30,7 @@ use ethcore::ethereum::Ethash;
use ethcore::ethereum::denominations::shannon; use ethcore::ethereum::denominations::shannon;
use v1::traits::{Eth, EthFilter}; use v1::traits::{Eth, EthFilter};
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log}; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log};
use v1::helpers::{PollFilter, PollManager};
/// Eth rpc implementation. /// Eth rpc implementation.
pub struct EthClient { pub struct EthClient {
@ -255,28 +257,98 @@ impl Eth for EthClient {
/// Eth filter rpc implementation. /// Eth filter rpc implementation.
pub struct EthFilterClient { pub struct EthFilterClient {
client: Weak<Client> client: Weak<Client>,
polls: Mutex<PollManager<PollFilter>>,
} }
impl EthFilterClient { impl EthFilterClient {
/// Creates new Eth filter client. /// Creates new Eth filter client.
pub fn new(client: &Arc<Client>) -> Self { pub fn new(client: &Arc<Client>) -> Self {
EthFilterClient { EthFilterClient {
client: Arc::downgrade(client) client: Arc::downgrade(client),
polls: Mutex::new(PollManager::new())
} }
} }
} }
impl EthFilter for EthFilterClient { impl EthFilter for EthFilterClient {
fn new_block_filter(&self, _params: Params) -> Result<Value, Error> { fn new_filter(&self, params: Params) -> Result<Value, Error> {
Ok(Value::U64(0)) from_params::<(Filter,)>(params)
.and_then(|(filter,)| {
let mut polls = self.polls.lock().unwrap();
let id = polls.create_poll(PollFilter::Logs(filter.into()), take_weak!(self.client).chain_info().best_block_number);
to_value(&U256::from(id))
})
} }
fn new_pending_transaction_filter(&self, _params: Params) -> Result<Value, Error> { fn new_block_filter(&self, params: Params) -> Result<Value, Error> {
Ok(Value::U64(1)) match params {
Params::None => {
let mut polls = self.polls.lock().unwrap();
let id = polls.create_poll(PollFilter::Block, take_weak!(self.client).chain_info().best_block_number);
to_value(&U256::from(id))
},
_ => Err(Error::invalid_params())
}
} }
fn filter_changes(&self, _: Params) -> Result<Value, Error> { fn new_pending_transaction_filter(&self, params: Params) -> Result<Value, Error> {
to_value(&take_weak!(self.client).chain_info().best_block_hash).map(|v| Value::Array(vec![v])) match params {
Params::None => {
let mut polls = self.polls.lock().unwrap();
let id = polls.create_poll(PollFilter::PendingTransaction, take_weak!(self.client).chain_info().best_block_number);
to_value(&U256::from(id))
},
_ => Err(Error::invalid_params())
}
}
fn filter_changes(&self, params: Params) -> Result<Value, Error> {
let client = take_weak!(self.client);
from_params::<(Index,)>(params)
.and_then(|(index,)| {
let info = self.polls.lock().unwrap().get_poll_info(&index.value()).cloned();
match info {
None => Ok(Value::Array(vec![] as Vec<Value>)),
Some(info) => match info.filter {
PollFilter::Block => {
let current_number = client.chain_info().best_block_number;
let hashes = (info.block_number..current_number).into_iter()
.map(BlockId::Number)
.filter_map(|id| client.block_hash(id))
.collect::<Vec<H256>>();
self.polls.lock().unwrap().update_poll(&index.value(), current_number);
to_value(&hashes)
},
PollFilter::PendingTransaction => {
// TODO: fix implementation once TransactionQueue is merged
to_value(&vec![] as &Vec<H256>)
},
PollFilter::Logs(mut filter) => {
filter.from_block = BlockId::Number(info.block_number);
filter.to_block = BlockId::Latest;
let logs = client.logs(filter)
.into_iter()
.map(From::from)
.collect::<Vec<Log>>();
let current_number = client.chain_info().best_block_number;
self.polls.lock().unwrap().update_poll(&index.value(), current_number);
to_value(&logs)
}
}
}
})
}
fn uninstall_filter(&self, params: Params) -> Result<Value, Error> {
from_params::<(Index,)>(params)
.and_then(|(index,)| {
self.polls.lock().unwrap().remove_poll(&index.value());
to_value(&true)
})
} }
} }

View File

@ -30,9 +30,7 @@ impl Web3Client {
impl Web3 for Web3Client { impl Web3 for Web3Client {
fn client_version(&self, params: Params) -> Result<Value, Error> { fn client_version(&self, params: Params) -> Result<Value, Error> {
match params { match params {
Params::None => { Params::None => Ok(Value::String(version().to_owned().replace("Parity/", "Parity//"))),
Ok(Value::String(version().to_owned().replace("Parity/", "Parity//"))),
}
_ => Err(Error::invalid_params()) _ => Err(Error::invalid_params())
} }
} }

View File

@ -23,6 +23,7 @@ mod impls;
mod types; mod types;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
mod helpers;
pub use self::traits::{Web3, Eth, EthFilter, Net}; pub use self::traits::{Web3, Eth, EthFilter, Net};
pub use self::impls::*; pub use self::impls::*;

View File

@ -14,22 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
mod block; #[cfg(feature = "serde_macros")]
mod block_number; include!("mod.rs.in");
mod bytes;
mod filter;
mod index;
mod log;
mod optionals;
mod sync;
mod transaction;
pub use self::block::{Block, BlockTransactions}; #[cfg(not(feature = "serde_macros"))]
pub use self::block_number::BlockNumber; include!(concat!(env!("OUT_DIR"), "/mod.rs"));
pub use self::bytes::Bytes;
pub use self::filter::Filter;
pub use self::index::Index;
pub use self::log::Log;
pub use self::optionals::OptionalValue;
pub use self::sync::{SyncStatus, SyncInfo};
pub use self::transaction::Transaction;

View File

@ -0,0 +1,35 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
mod block;
mod block_number;
mod bytes;
mod filter;
mod index;
mod log;
mod optionals;
mod sync;
mod transaction;
pub use self::block::{Block, BlockTransactions};
pub use self::block_number::BlockNumber;
pub use self::bytes::Bytes;
pub use self::filter::Filter;
pub use self::index::Index;
pub use self::log::Log;
pub use self::optionals::OptionalValue;
pub use self::sync::{SyncStatus, SyncInfo};
pub use self::transaction::Transaction;

View File

@ -635,16 +635,7 @@ impl ChainSync {
match self.last_imported_block { None => 0, Some(x) => x } match self.last_imported_block { None => 0, Some(x) => x }
} }
/// Find some headers or blocks to download for a peer. fn find_block_bodies_hashes_to_request(&self, ignore_others: bool) -> (Vec<H256>, Vec<BlockNumber>) {
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId, ignore_others: bool) {
self.clear_peer_download(peer_id);
if io.chain().queue_info().is_full() {
self.pause_sync();
return;
}
// check to see if we need to download any block bodies first
let mut needed_bodies: Vec<H256> = Vec::new(); let mut needed_bodies: Vec<H256> = Vec::new();
let mut needed_numbers: Vec<BlockNumber> = Vec::new(); let mut needed_numbers: Vec<BlockNumber> = Vec::new();
@ -664,18 +655,33 @@ impl ChainSync {
} }
} }
} }
(needed_bodies, needed_numbers)
}
/// Find some headers or blocks to download for a peer.
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId, ignore_others: bool) {
self.clear_peer_download(peer_id);
if io.chain().queue_info().is_full() {
self.pause_sync();
return;
}
// check to see if we need to download any block bodies first
let (needed_bodies, needed_numbers) = self.find_block_bodies_hashes_to_request(ignore_others);
if !needed_bodies.is_empty() { if !needed_bodies.is_empty() {
let (head, _) = self.headers.range_iter().next().unwrap(); let (head, _) = self.headers.range_iter().next().unwrap();
if needed_numbers.first().unwrap() - head > self.max_download_ahead_blocks as BlockNumber { if needed_numbers.first().unwrap() - head > self.max_download_ahead_blocks as BlockNumber {
trace!(target: "sync", "{}: Stalled download ({} vs {}), helping with downloading block bodies", peer_id, needed_numbers.first().unwrap(), head); trace!(target: "sync", "{}: Stalled download ({} vs {}), helping with downloading block bodies", peer_id, needed_numbers.first().unwrap(), head);
self.request_blocks(io, peer_id, true); self.request_blocks(io, peer_id, true);
return; } else {
}
self.downloading_bodies.extend(needed_numbers.iter()); self.downloading_bodies.extend(needed_numbers.iter());
replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, needed_numbers); replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, needed_numbers);
self.request_bodies(io, peer_id, needed_bodies); self.request_bodies(io, peer_id, needed_bodies);
} }
else { return;
}
// check if need to download headers // check if need to download headers
let mut start = 0; let mut start = 0;
if !self.have_common_block { if !self.have_common_block {
@ -733,7 +739,6 @@ impl ChainSync {
self.request_headers_by_number(io, peer_id, start, 1, 0, false); self.request_headers_by_number(io, peer_id, start, 1, 0, false);
} }
} }
}
/// Clear all blocks/headers marked as being downloaded by a peer. /// Clear all blocks/headers marked as being downloaded by a peer.
fn clear_peer_download(&mut self, peer_id: PeerId) { fn clear_peer_download(&mut self, peer_id: PeerId) {

View File

@ -105,6 +105,10 @@ impl BlockChainClient for TestBlockChainClient {
Some(U256::zero()) Some(U256::zero())
} }
fn block_hash(&self, id: BlockId) -> Option<H256> {
unimplemented!();
}
fn code(&self, _address: &Address) -> Option<Bytes> { fn code(&self, _address: &Address) -> Option<Bytes> {
unimplemented!(); unimplemented!();
} }

View File

@ -2003,6 +2003,7 @@ mod tests {
#[test] #[test]
#[cfg_attr(feature = "dev", allow(cyclomatic_complexity))]
fn u256_multi_full_mul() { fn u256_multi_full_mul() {
let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0]));
assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result);

View File

@ -111,7 +111,7 @@ impl<Row, Col, Val> Table<Row, Col, Val>
/// ///
/// Returns previous value (if any) /// Returns previous value (if any)
pub fn insert(&mut self, row: Row, col: Col, val: Val) -> Option<Val> { pub fn insert(&mut self, row: Row, col: Col, val: Val) -> Option<Val> {
self.map.entry(row).or_insert_with(|| HashMap::new()).insert(col, val) self.map.entry(row).or_insert_with(HashMap::new).insert(col, val)
} }
} }