stable backports for 1.11.10 (#9228)

* parity-version: bump stable to 1.11.9

* Fix compilation error on nightly rust (#8707)

On nightly rust passing `public_url` works but that breaks on stable. This works for both.

* parity-version: bump stable to 1.11.10

* Check if synced when using eth_getWork (#9193) (#9210)

* Check if synced when using eth_getWork (#9193)

* Don't use fn syncing

* Fix identation

* Fix typo

* Don't check for warping

* rpc: avoid calling queue_info twice on eth_getWork

* Fix potential as_usize overflow when casting from U256 in miner (#9221)

* Allow old blocks from peers with lower difficulty (#9226)

Previously we only allow downloading of old blocks if the peer
difficulty was greater than our syncing difficulty. This change allows
downloading of blocks from peers where the difficulty is greater then
the last downloaded old block.

* Update Dockerfile (#9242)

* Update Dockerfile

fix Docker build

* fix dockerfile paths: parity -> parity-ethereum (#9248)

* Update tobalaba.json (#9313)

* Light client `Provide default nonce in transactions when it´s missing` (#9370)

* Provide `default_nonce` in tx`s when it´s missing

When `nonce` is missing in a `EthTransaction` will cause it to fall in
these cases provide `default_nonce` value instead!

* Changed http:// to https:// on Yasm link (#9369)

Changed http:// to https:// on Yasm link in README.md

* Provide `default_nonce` in tx`s when it´s missing

When `nonce` is missing in a `EthTransaction` will cause it to fall in
these cases provide `default_nonce` value instead!

* Address grumbles

* ethcore: kovan: delay activation of strict score validation (#9406)

* Use impl Future in the light client RPC helpers (#8628)

* Better support for eth_getLogs in light mode (#9186)

* Light client on-demand request for headers range.

* Cache headers in HeaderWithAncestors response.

Also fulfills request locally if all headers are in cache.

* LightFetch::logs fetches missing headers on demand.

* LightFetch::logs limit the number of headers requested at a time.

* LightFetch::logs refactor header fetching logic.

* Enforce limit on header range length in light client logs request.

* Fix light request tests after struct change.

* Respond to review comments.

* Propagate transactions for next 4 blocks. (#9265)

Closes #9255

This PR also removes the limit of max 64 transactions per packet, currently we only attempt to prevent the packet size to go over 8MB. This will only be the case for super-large transactions or high-block-gas-limit chains.

Patching this is important only for chains that have blocks that can fit more than 4k transactions (over 86M block gas limit)

For mainnet, we should actually see a tiny bit faster propagation since instead of computing 4k pending set, we only need `4 * 8M / 21k = 1523` transactions.

Running some tests on `dekompile` node right now, to check how it performs in the wild.

* ethcore: fix pow difficulty validation (#9328)

* ethcore: fix pow difficulty validation

* ethcore: validate difficulty is not zero

* ethcore: add issue link to regression test

* ethcore: fix tests

* ethcore: move difficulty_to_boundary to ethash crate

* ethcore: reuse difficulty_to_boundary and boundary_to_difficulty

* ethcore: fix grumbles in difficulty_to_boundary_aux
This commit is contained in:
Afri Schoedon 2018-08-30 19:59:01 +02:00 committed by GitHub
parent 92776e4acf
commit 31720e6151
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 697 additions and 239 deletions

18
Cargo.lock generated
View File

@ -494,6 +494,7 @@ version = "1.11.0"
dependencies = [
"crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"keccak-hash 0.1.0",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1890,7 +1891,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.20 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -1980,7 +1981,7 @@ dependencies = [
[[package]]
name = "parity"
version = "1.11.8"
version = "1.11.10"
dependencies = [
"ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"atty 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2031,7 +2032,7 @@ dependencies = [
"parity-rpc 1.11.0",
"parity-rpc-client 1.4.0",
"parity-updater 1.11.0",
"parity-version 1.11.8",
"parity-version 1.11.10",
"parity-whisper 0.1.0",
"parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"path 0.1.0",
@ -2079,7 +2080,7 @@ dependencies = [
"parity-reactor 0.1.0",
"parity-ui 1.11.0",
"parity-ui-deprecation 1.10.0",
"parity-version 1.11.8",
"parity-version 1.11.10",
"parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"registrar 0.0.1",
@ -2221,9 +2222,10 @@ dependencies = [
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-reactor 0.1.0",
"parity-updater 1.11.0",
"parity-version 1.11.8",
"parity-version 1.11.10",
"parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"patricia-trie 0.1.0",
"plain_hasher 0.1.0",
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.2.1",
@ -2342,7 +2344,7 @@ dependencies = [
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-hash-fetch 1.11.0",
"parity-version 1.11.8",
"parity-version 1.11.10",
"parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"path 0.1.0",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2353,7 +2355,7 @@ dependencies = [
[[package]]
name = "parity-version"
version = "1.11.8"
version = "1.11.10"
dependencies = [
"ethcore-bytes 0.1.0",
"rlp 0.2.1",
@ -2691,7 +2693,7 @@ dependencies = [
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.20 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]

View File

@ -2,7 +2,7 @@
description = "Parity Ethereum client"
name = "parity"
# NOTE Make sure to update util/version/Cargo.toml as well
version = "1.11.8"
version = "1.11.10"
license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"]

View File

@ -36,13 +36,13 @@ RUN apt-get update && \
# show backtraces
RUST_BACKTRACE=1 && \
# build parity
cd /build&&git clone https://github.com/paritytech/parity && \
cd parity && \
cd /build&&git clone https://github.com/paritytech/parity-ethereum && \
cd parity-ethereum && \
git pull&& \
git checkout $BUILD_TAG && \
cargo build --verbose --release --features final && \
strip /build/parity/target/release/parity && \
file /build/parity/target/release/parity&&mkdir -p /parity&& cp /build/parity/target/release/parity /parity&&\
strip /build/parity-ethereum/target/release/parity && \
file /build/parity-ethereum/target/release/parity&&mkdir -p /parity&& cp /build/parity-ethereum/target/release/parity /parity&&\
#cleanup Docker image
rm -rf /root/.cargo&&rm -rf /root/.multirust&&rm -rf /root/.rustup&&rm -rf /build&&\
apt-get purge -y \

View File

@ -6,13 +6,14 @@ authors = ["Parity Technologies <admin@parity.io>"]
[lib]
[dependencies]
log = "0.3"
keccak-hash = { path = "../util/hash" }
primal = "0.2.3"
parking_lot = "0.5"
crunchy = "0.1.0"
memmap = "0.6"
either = "1.0.0"
ethereum-types = "0.3"
keccak-hash = { path = "../util/hash" }
log = "0.3"
memmap = "0.6"
parking_lot = "0.5"
primal = "0.2.3"
[dev-dependencies]
tempdir = "0.3"

View File

@ -16,10 +16,11 @@
#![cfg_attr(feature = "benches", feature(test))]
extern crate primal;
extern crate parking_lot;
extern crate either;
extern crate ethereum_types;
extern crate memmap;
extern crate parking_lot;
extern crate primal;
#[macro_use]
extern crate crunchy;
@ -38,6 +39,7 @@ mod shared;
pub use cache::{NodeCacheBuilder, OptimizeFor};
pub use compute::{ProofOfWork, quick_get_difficulty, slow_hash_block_number};
use compute::Light;
use ethereum_types::{U256, U512};
use keccak::H256;
use parking_lot::Mutex;
pub use seed_compute::SeedHashCompute;
@ -136,6 +138,29 @@ impl EthashManager {
}
}
/// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`.
pub fn boundary_to_difficulty(boundary: &ethereum_types::H256) -> U256 {
difficulty_to_boundary_aux(&**boundary)
}
/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`.
pub fn difficulty_to_boundary(difficulty: &U256) -> ethereum_types::H256 {
difficulty_to_boundary_aux(difficulty).into()
}
fn difficulty_to_boundary_aux<T: Into<U512>>(difficulty: T) -> ethereum_types::U256 {
let difficulty = difficulty.into();
assert!(!difficulty.is_zero());
if difficulty == U512::one() {
U256::max_value()
} else {
// difficulty > 1, so result should never overflow 256 bits
U256::from((U512::one() << 256) / difficulty)
}
}
#[test]
fn test_lru() {
use tempdir::TempDir;
@ -155,6 +180,43 @@ fn test_lru() {
assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0);
}
#[test]
fn test_difficulty_to_boundary() {
use ethereum_types::H256;
use std::str::FromStr;
assert_eq!(difficulty_to_boundary(&U256::from(1)), H256::from(U256::max_value()));
assert_eq!(difficulty_to_boundary(&U256::from(2)), H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(difficulty_to_boundary(&U256::from(4)), H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(difficulty_to_boundary(&U256::from(32)), H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap());
}
#[test]
fn test_difficulty_to_boundary_regression() {
use ethereum_types::H256;
// the last bit was originally being truncated when performing the conversion
// https://github.com/paritytech/parity-ethereum/issues/8397
for difficulty in 1..9 {
assert_eq!(U256::from(difficulty), boundary_to_difficulty(&difficulty_to_boundary(&difficulty.into())));
assert_eq!(H256::from(difficulty), difficulty_to_boundary(&boundary_to_difficulty(&difficulty.into())));
assert_eq!(U256::from(difficulty), boundary_to_difficulty(&boundary_to_difficulty(&difficulty.into()).into()));
assert_eq!(H256::from(difficulty), difficulty_to_boundary(&difficulty_to_boundary(&difficulty.into()).into()));
}
}
#[test]
#[should_panic]
fn test_difficulty_to_boundary_panics_on_zero() {
difficulty_to_boundary(&U256::from(0));
}
#[test]
#[should_panic]
fn test_boundary_to_difficulty_panics_on_zero() {
boundary_to_difficulty(&ethereum_types::H256::from(0));
}
#[cfg(feature = "benches")]
mod benchmarks {
extern crate test;

View File

@ -43,7 +43,7 @@ pub mod provider;
mod types;
pub use self::cache::Cache;
pub use self::provider::Provider;
pub use self::provider::{Provider, MAX_HEADERS_PER_REQUEST};
pub use self::transaction_queue::TransactionQueue;
pub use types::request as request;

View File

@ -72,9 +72,6 @@ const PROPAGATE_TIMEOUT_INTERVAL: Duration = Duration::from_secs(5);
const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
const RECALCULATE_COSTS_INTERVAL: Duration = Duration::from_secs(60 * 60);
/// Max number of transactions in a single packet.
const MAX_TRANSACTIONS_TO_PROPAGATE: usize = 64;
// minimum interval between updates.
const UPDATE_INTERVAL: Duration = Duration::from_millis(5000);
@ -651,7 +648,7 @@ impl LightProtocol {
fn propagate_transactions(&self, io: &IoContext) {
if self.capabilities.read().tx_relay { return }
let ready_transactions = self.provider.ready_transactions(MAX_TRANSACTIONS_TO_PROPAGATE);
let ready_transactions = self.provider.transactions_to_propagate();
if ready_transactions.is_empty() { return }
trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len());

View File

@ -173,8 +173,8 @@ impl Provider for TestProvider {
})
}
fn ready_transactions(&self, max_len: usize) -> Vec<PendingTransaction> {
self.0.client.ready_transactions(max_len)
fn transactions_to_propagate(&self) -> Vec<PendingTransaction> {
self.0.client.transactions_to_propagate()
}
}

View File

@ -204,6 +204,8 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities {
caps.serve_headers = true,
CheckedRequest::HeaderByHash(_, _) =>
caps.serve_headers = true,
CheckedRequest::HeaderWithAncestors(_, _) =>
caps.serve_headers = true,
CheckedRequest::TransactionIndex(_, _) => {} // hashes yield no info.
CheckedRequest::Signal(_, _) =>
caps.serve_headers = true,

View File

@ -16,6 +16,7 @@
//! Request types, verification, and verification errors.
use std::cmp;
use std::sync::Arc;
use ethcore::basic_account::BasicAccount;
@ -48,6 +49,8 @@ pub enum Request {
HeaderProof(HeaderProof),
/// A request for a header by hash.
HeaderByHash(HeaderByHash),
/// A request for a header by hash with a range of its ancestors.
HeaderWithAncestors(HeaderWithAncestors),
/// A request for the index of a transaction.
TransactionIndex(TransactionIndex),
/// A request for block receipts.
@ -137,6 +140,7 @@ macro_rules! impl_single {
// implement traits for each kind of request.
impl_single!(HeaderProof, HeaderProof, (H256, U256));
impl_single!(HeaderByHash, HeaderByHash, encoded::Header);
impl_single!(HeaderWithAncestors, HeaderWithAncestors, Vec<encoded::Header>);
impl_single!(TransactionIndex, TransactionIndex, net_request::TransactionIndexResponse);
impl_single!(Receipts, BlockReceipts, Vec<Receipt>);
impl_single!(Body, Body, encoded::Block);
@ -247,6 +251,7 @@ impl From<encoded::Header> for HeaderRef {
pub enum CheckedRequest {
HeaderProof(HeaderProof, net_request::IncompleteHeaderProofRequest),
HeaderByHash(HeaderByHash, net_request::IncompleteHeadersRequest),
HeaderWithAncestors(HeaderWithAncestors, net_request::IncompleteHeadersRequest),
TransactionIndex(TransactionIndex, net_request::IncompleteTransactionIndexRequest),
Receipts(BlockReceipts, net_request::IncompleteReceiptsRequest),
Body(Body, net_request::IncompleteBodyRequest),
@ -268,6 +273,16 @@ impl From<Request> for CheckedRequest {
};
CheckedRequest::HeaderByHash(req, net_req)
}
Request::HeaderWithAncestors(req) => {
let net_req = net_request::IncompleteHeadersRequest {
start: req.block_hash.map(Into::into),
skip: 0,
max: req.ancestor_count + 1,
reverse: true,
};
trace!(target: "on_demand", "HeaderWithAncestors Request, {:?}", net_req);
CheckedRequest::HeaderWithAncestors(req, net_req)
}
Request::HeaderProof(req) => {
let net_req = net_request::IncompleteHeaderProofRequest {
num: req.num().into(),
@ -336,6 +351,7 @@ impl CheckedRequest {
match self {
CheckedRequest::HeaderProof(_, req) => NetRequest::HeaderProof(req),
CheckedRequest::HeaderByHash(_, req) => NetRequest::Headers(req),
CheckedRequest::HeaderWithAncestors(_, req) => NetRequest::Headers(req),
CheckedRequest::TransactionIndex(_, req) => NetRequest::TransactionIndex(req),
CheckedRequest::Receipts(_, req) => NetRequest::Receipts(req),
CheckedRequest::Body(_, req) => NetRequest::Body(req),
@ -391,6 +407,27 @@ impl CheckedRequest {
None
}
CheckedRequest::HeaderWithAncestors(_, ref req) => {
if req.skip != 1 || !req.reverse {
return None;
}
if let Some(&net_request::HashOrNumber::Hash(start)) = req.start.as_ref() {
let mut result = Vec::with_capacity(req.max as usize);
let mut hash = start;
let mut cache = cache.lock();
for _ in 0..req.max {
match cache.block_header(&hash) {
Some(header) => {
hash = header.parent_hash();
result.push(header);
}
None => return None,
}
}
Some(Response::HeaderWithAncestors(result))
} else { None }
}
CheckedRequest::Receipts(ref check, ref req) => {
// empty transactions -> no receipts
if check.0.as_ref().ok().map_or(false, |hdr| hdr.receipts_root() == KECCAK_NULL_RLP) {
@ -459,6 +496,7 @@ macro_rules! match_me {
match $me {
CheckedRequest::HeaderProof($check, $req) => $e,
CheckedRequest::HeaderByHash($check, $req) => $e,
CheckedRequest::HeaderWithAncestors($check, $req) => $e,
CheckedRequest::TransactionIndex($check, $req) => $e,
CheckedRequest::Receipts($check, $req) => $e,
CheckedRequest::Body($check, $req) => $e,
@ -488,6 +526,15 @@ impl IncompleteRequest for CheckedRequest {
_ => Ok(()),
}
}
CheckedRequest::HeaderWithAncestors(ref check, ref req) => {
req.check_outputs(&mut f)?;
// make sure the output given is definitively a hash.
match check.block_hash {
Field::BackReference(r, idx) => f(r, idx, OutputKind::Hash),
_ => Ok(()),
}
}
CheckedRequest::TransactionIndex(_, ref req) => req.check_outputs(f),
CheckedRequest::Receipts(_, ref req) => req.check_outputs(f),
CheckedRequest::Body(_, ref req) => req.check_outputs(f),
@ -508,15 +555,46 @@ impl IncompleteRequest for CheckedRequest {
fn complete(self) -> Result<Self::Complete, net_request::NoSuchOutput> {
match self {
CheckedRequest::HeaderProof(_, req) => req.complete().map(CompleteRequest::HeaderProof),
CheckedRequest::HeaderByHash(_, req) => req.complete().map(CompleteRequest::Headers),
CheckedRequest::TransactionIndex(_, req) => req.complete().map(CompleteRequest::TransactionIndex),
CheckedRequest::Receipts(_, req) => req.complete().map(CompleteRequest::Receipts),
CheckedRequest::Body(_, req) => req.complete().map(CompleteRequest::Body),
CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account),
CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code),
CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution),
CheckedRequest::Signal(_, req) => req.complete().map(CompleteRequest::Signal),
CheckedRequest::HeaderProof(_, req) => {
trace!(target: "on_demand", "HeaderProof request completed {:?}", req);
req.complete().map(CompleteRequest::HeaderProof)
}
CheckedRequest::HeaderByHash(_, req) => {
trace!(target: "on_demand", "HeaderByHash request completed {:?}", req);
req.complete().map(CompleteRequest::Headers)
}
CheckedRequest::HeaderWithAncestors(_, req) => {
trace!(target: "on_demand", "HeaderWithAncestors request completed {:?}", req);
req.complete().map(CompleteRequest::Headers)
}
CheckedRequest::TransactionIndex(_, req) => {
trace!(target: "on_demand", "TransactionIndex request completed {:?}", req);
req.complete().map(CompleteRequest::TransactionIndex)
}
CheckedRequest::Receipts(_, req) => {
trace!(target: "on_demand", "Receipt request completed {:?}", req);
req.complete().map(CompleteRequest::Receipts)
}
CheckedRequest::Body(_, req) => {
trace!(target: "on_demand", "Block request completed {:?}", req);
req.complete().map(CompleteRequest::Body)
}
CheckedRequest::Account(_, req) => {
trace!(target: "on_demand", "Account request completed {:?}", req);
req.complete().map(CompleteRequest::Account)
}
CheckedRequest::Code(_, req) => {
trace!(target: "on_demand", "Code request completed {:?}", req);
req.complete().map(CompleteRequest::Code)
}
CheckedRequest::Execution(_, req) => {
trace!(target: "on_demand", "Execution request completed {:?}", req);
req.complete().map(CompleteRequest::Execution)
}
CheckedRequest::Signal(_, req) => {
trace!(target: "on_demand", "Signal request completed {:?}", req);
req.complete().map(CompleteRequest::Signal)
}
}
}
@ -553,6 +631,9 @@ impl net_request::CheckedRequest for CheckedRequest {
CheckedRequest::HeaderByHash(ref prover, _) =>
expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) =>
prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderByHash)),
CheckedRequest::HeaderWithAncestors(ref prover, _) =>
expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) =>
prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderWithAncestors)),
CheckedRequest::TransactionIndex(ref prover, _) =>
expect!((&NetResponse::TransactionIndex(ref res), _) =>
prover.check_response(cache, res).map(Response::TransactionIndex)),
@ -586,6 +667,8 @@ pub enum Response {
HeaderProof((H256, U256)),
/// Response to a header-by-hash request.
HeaderByHash(encoded::Header),
/// Response to a header-by-hash with ancestors request.
HeaderWithAncestors(Vec<encoded::Header>),
/// Response to a transaction-index request.
TransactionIndex(net_request::TransactionIndexResponse),
/// Response to a receipts request.
@ -627,6 +710,10 @@ pub enum Error {
Decoder(::rlp::DecoderError),
/// Empty response.
Empty,
/// Response data length exceeds request max.
TooManyResults(u64, u64),
/// Response data is incomplete.
TooFewResults(u64, u64),
/// Trie lookup error (result of bad proof)
Trie(TrieError),
/// Bad inclusion proof
@ -643,6 +730,8 @@ pub enum Error {
WrongTrieRoot(H256, H256),
/// Wrong response kind.
WrongKind,
/// Wrong sequence of headers.
WrongHeaderSequence,
}
impl From<::rlp::DecoderError> for Error {
@ -703,6 +792,65 @@ impl HeaderProof {
}
}
/// Request for a header by hash with a range of ancestors.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct HeaderWithAncestors {
/// Hash of the last block in the range to fetch.
pub block_hash: Field<H256>,
/// Number of headers before the last block to fetch in addition.
pub ancestor_count: u64,
}
impl HeaderWithAncestors {
/// Check a response for the headers.
pub fn check_response(
&self,
cache: &Mutex<::cache::Cache>,
start: &net_request::HashOrNumber,
headers: &[encoded::Header]
) -> Result<Vec<encoded::Header>, Error> {
let expected_hash = match (self.block_hash, start) {
(Field::Scalar(ref h), &net_request::HashOrNumber::Hash(ref h2)) => {
if h != h2 { return Err(Error::WrongHash(*h, *h2)) }
*h
}
(_, &net_request::HashOrNumber::Hash(h2)) => h2,
_ => return Err(Error::HeaderByNumber),
};
let start_header = headers.first().ok_or(Error::Empty)?;
let start_hash = start_header.hash();
if start_hash != expected_hash {
return Err(Error::WrongHash(expected_hash, start_hash));
}
let expected_len = 1 + cmp::min(self.ancestor_count, start_header.number());
let actual_len = headers.len() as u64;
match actual_len.cmp(&expected_len) {
cmp::Ordering::Less =>
return Err(Error::TooFewResults(expected_len, actual_len)),
cmp::Ordering::Greater =>
return Err(Error::TooManyResults(expected_len, actual_len)),
cmp::Ordering::Equal => (),
};
for (header, prev_header) in headers.iter().zip(headers[1..].iter()) {
if header.number() != prev_header.number() + 1 ||
header.parent_hash() != prev_header.hash()
{
return Err(Error::WrongHeaderSequence)
}
}
let mut cache = cache.lock();
for header in headers {
cache.insert_block_header(header.hash(), header.clone());
}
Ok(headers.to_vec())
}
}
/// Request for a header by hash.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct HeaderByHash(pub Field<H256>);
@ -994,6 +1142,83 @@ mod tests {
assert!(HeaderByHash(hash.into()).check_response(&cache, &hash.into(), &[raw_header]).is_ok())
}
#[test]
fn check_header_with_ancestors() {
let mut last_header_hash = H256::default();
let mut headers = (0..11).map(|num| {
let mut header = Header::new();
header.set_number(num);
header.set_parent_hash(last_header_hash);
last_header_hash = header.hash();
header
}).collect::<Vec<_>>();
headers.reverse(); // because responses are in reverse order
let raw_headers = headers.iter()
.map(|hdr| encoded::Header::new(::rlp::encode(hdr).into_vec()))
.collect::<Vec<_>>();
let mut invalid_successor = Header::new();
invalid_successor.set_number(11);
invalid_successor.set_parent_hash(headers[1].hash());
let raw_invalid_successor = encoded::Header::new(::rlp::encode(&invalid_successor).into_vec());
let cache = Mutex::new(make_cache());
let header_with_ancestors = |hash, count| {
HeaderWithAncestors {
block_hash: hash,
ancestor_count: count
}
};
// Correct responses
assert!(header_with_ancestors(headers[0].hash().into(), 0)
.check_response(&cache, &headers[0].hash().into(), &raw_headers[0..1]).is_ok());
assert!(header_with_ancestors(headers[0].hash().into(), 2)
.check_response(&cache, &headers[0].hash().into(), &raw_headers[0..3]).is_ok());
assert!(header_with_ancestors(headers[0].hash().into(), 10)
.check_response(&cache, &headers[0].hash().into(), &raw_headers[0..11]).is_ok());
assert!(header_with_ancestors(headers[2].hash().into(), 2)
.check_response(&cache, &headers[2].hash().into(), &raw_headers[2..5]).is_ok());
assert!(header_with_ancestors(headers[2].hash().into(), 10)
.check_response(&cache, &headers[2].hash().into(), &raw_headers[2..11]).is_ok());
assert!(header_with_ancestors(invalid_successor.hash().into(), 0)
.check_response(&cache, &invalid_successor.hash().into(), &[raw_invalid_successor.clone()]).is_ok());
// Incorrect responses
assert_eq!(header_with_ancestors(invalid_successor.hash().into(), 0)
.check_response(&cache, &headers[0].hash().into(), &raw_headers[0..1]),
Err(Error::WrongHash(invalid_successor.hash(), headers[0].hash())));
assert_eq!(header_with_ancestors(headers[0].hash().into(), 0)
.check_response(&cache, &headers[0].hash().into(), &[]),
Err(Error::Empty));
assert_eq!(header_with_ancestors(headers[0].hash().into(), 10)
.check_response(&cache, &headers[0].hash().into(), &raw_headers[0..10]),
Err(Error::TooFewResults(11, 10)));
assert_eq!(header_with_ancestors(headers[0].hash().into(), 9)
.check_response(&cache, &headers[0].hash().into(), &raw_headers[0..11]),
Err(Error::TooManyResults(10, 11)));
let response = &[raw_headers[0].clone(), raw_headers[2].clone()];
assert_eq!(header_with_ancestors(headers[0].hash().into(), 1)
.check_response(&cache, &headers[0].hash().into(), response),
Err(Error::WrongHeaderSequence));
let response = &[raw_invalid_successor.clone(), raw_headers[0].clone()];
assert_eq!(header_with_ancestors(invalid_successor.hash().into(), 1)
.check_response(&cache, &invalid_successor.hash().into(), response),
Err(Error::WrongHeaderSequence));
let response = &[raw_invalid_successor.clone(), raw_headers[1].clone()];
assert_eq!(header_with_ancestors(invalid_successor.hash().into(), 1)
.check_response(&cache, &invalid_successor.hash().into(), response),
Err(Error::WrongHeaderSequence));
}
#[test]
fn check_body() {
use rlp::RlpStream;

View File

@ -33,6 +33,9 @@ use transaction_queue::TransactionQueue;
use request;
/// Maximum allowed size of a headers request.
pub const MAX_HEADERS_PER_REQUEST: u64 = 512;
/// Defines the operations that a provider for the light subprotocol must fulfill.
pub trait Provider: Send + Sync {
/// Provide current blockchain info.
@ -54,7 +57,6 @@ pub trait Provider: Send + Sync {
/// results within must adhere to the `skip` and `reverse` parameters.
fn block_headers(&self, req: request::CompleteHeadersRequest) -> Option<request::HeadersResponse> {
use request::HashOrNumber;
const MAX_HEADERS_TO_SEND: u64 = 512;
if req.max == 0 { return None }
@ -83,7 +85,7 @@ pub trait Provider: Send + Sync {
}
};
let max = ::std::cmp::min(MAX_HEADERS_TO_SEND, req.max);
let max = ::std::cmp::min(MAX_HEADERS_PER_REQUEST, req.max);
let headers: Vec<_> = (0u64..max)
.map(|x: u64| x.saturating_mul(req.skip.saturating_add(1)))
@ -128,7 +130,7 @@ pub trait Provider: Send + Sync {
fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option<request::HeaderProofResponse>;
/// Provide pending transactions.
fn ready_transactions(&self, max_len: usize) -> Vec<PendingTransaction>;
fn transactions_to_propagate(&self) -> Vec<PendingTransaction>;
/// Provide a proof-of-execution for the given transaction proof request.
/// Returns a vector of all state items necessary to execute the transaction.
@ -283,8 +285,8 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
.map(|(_, proof)| ::request::ExecutionResponse { items: proof })
}
fn ready_transactions(&self, max_len: usize) -> Vec<PendingTransaction> {
BlockChainClient::ready_transactions(self, max_len)
fn transactions_to_propagate(&self) -> Vec<PendingTransaction> {
BlockChainClient::transactions_to_propagate(self)
.into_iter()
.map(|tx| tx.pending().clone())
.collect()
@ -370,12 +372,10 @@ impl<L: AsLightClient + Send + Sync> Provider for LightProvider<L> {
None
}
fn ready_transactions(&self, max_len: usize) -> Vec<PendingTransaction> {
fn transactions_to_propagate(&self) -> Vec<PendingTransaction> {
let chain_info = self.chain_info();
let mut transactions = self.txqueue.read()
.ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp);
transactions.truncate(max_len);
transactions
self.txqueue.read()
.ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp)
}
}

View File

@ -19,7 +19,7 @@
"0x00a0a24b9f0e5ec7aa4c7389b8302fd0123194de"
]
},
"validateScoreTransition": 1000000,
"validateScoreTransition": 4301764,
"validateStepTransition": 1500000,
"maximumUncleCountTransition": 5067000,
"maximumUncleCount": 0

View File

@ -16,7 +16,16 @@
"gasLimitBoundDivisor": "0x400",
"minGasLimit": "0x1388",
"networkID": "0x62121",
"wasmActivationTransition": 4000000
"wasmActivationTransition": 6666666,
"eip140Transition": 6666666,
"eip211Transition": 6666666,
"eip214Transition": 6666666,
"eip658Transition": 6666666,
"maxCodeSize": 24576,
"maxCodeSizeTransition": 6666666,
"registrar": "0xb8624dc8cb3ca3147c178ac4c21734eb49e04071"
},
"genesis": {
"seal": {
@ -43,12 +52,22 @@
},
"0x4ba15b56452521c0826a35a6f2022e1210fc519b": {
"balance": "0x7E37BE2022B2B09472D89C0000"
}
},
"0x0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "activate_at": 6666666, "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0x0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "activate_at": 6666666, "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0x0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "activate_at": 6666666, "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0x0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "activate_at": 6666666, "pricing": { "linear": { "base": 15, "word": 3 } } } },
"0x0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": 6666666, "pricing": { "modexp": { "divisor": 20 } } } },
"0x0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": 6666666, "pricing": { "linear": { "base": 500, "word": 0 } } } },
"0x0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": 6666666, "pricing": { "linear": { "base": 40000, "word": 0 } } } },
"0x0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": 6666666, "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } }
},
"nodes": [
"enode://147573f46fe9f5cc38fbe070089a31390baec5dd2827c8f2ef168833e4d0254fbee3969a02c5b9910ea5d5b23d86a6ed5eabcda17cc12007b7d9178b6c697aa5@37.120.168.56:30303",
"enode://a370d5fd55959f20af6d1565b151a760c1372f5a2aaf674d4892cd4fd2de0d1f672781cd40e0d4e4b51c5823527ddec73b31cc14ac685449d9f0866996a16b9f@13.76.165.180:30303",
"enode://da019fa5fb1fda105100d68a986938ec15ac5c6ff69d6e4ad3e350e377057f3e67e33aea5feb22d5cdcfc22041d141c8453c77baa64a216fff98f191ca76b3ec@18.220.108.238:30303",
"enode://49498fb8cdcd79c813ccdaa9496a3a4be0a187a3183e99adbc04d9c90b9a62ad59f0b6832f6e43b48e63fbebf74ec5438eb0d6d9098330edf36413d276fedf81@13.80.148.117:30303"
"enode://eda34244538d72f42605a6fc8b8a34b15714c683989e8b29dc9e7a2b2088da490a5b32f2c149bec5a5c482bf03ec2c4f38b833ae31e36fcb26fb05fd094b2a88@18.197.33.9:30303",
"enode://12e903e900137b02b22e01f7918bd6e7310773c313e4e577281f35597e394a3e0b54c7314a8970a9776c5a3e5dc4daee289215dea3897bcb6d5cf0bb1dd2d356@18.197.31.231:30303",
"enode://423fdb91b37ec0714af0c19f625ec4af3ada2844367a36e45a05703577a84f7f0e9483585d4950a35c9e3738dba8c6abd7e1ce278d9a1f3f28065bc009f409cd@52.221.203.209:30303",
"enode://a9327d37d07799817d4a3e13d49fb4f5cc1486d4adf3ec8a6b98be62c4d7a5453914a5139dbe124809a388514cb0be37f9fa799539abe2250672f6d3d778b821@18.191.209.251:30303"
]
}

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::cmp;
use std::collections::{HashSet, BTreeMap, BTreeSet, VecDeque};
use std::fmt;
use std::str::FromStr;
@ -1893,7 +1894,25 @@ impl BlockChainClient for Client {
(*self.build_last_hashes(&self.chain.read().best_block_hash())).clone()
}
fn ready_transactions(&self, max_len: usize) -> Vec<Arc<VerifiedTransaction>> {
fn transactions_to_propagate(&self) -> Vec<Arc<VerifiedTransaction>> {
const PROPAGATE_FOR_BLOCKS: u32 = 4;
const MIN_TX_TO_PROPAGATE: usize = 256;
let block_gas_limit = *self.best_block_header().gas_limit();
let min_tx_gas: U256 = self.latest_schedule().tx_gas.into();
let max_len = if min_tx_gas.is_zero() {
usize::max_value()
} else {
cmp::max(
MIN_TX_TO_PROPAGATE,
cmp::min(
(block_gas_limit / min_tx_gas) * PROPAGATE_FOR_BLOCKS,
// never more than usize
usize::max_value().into()
).as_u64() as usize
)
};
self.importer.miner.ready_transactions(self, max_len, ::miner::PendingOrdering::Priority)
}

View File

@ -806,8 +806,8 @@ impl BlockChainClient for TestBlockChainClient {
self.traces.read().clone()
}
fn ready_transactions(&self, max_len: usize) -> Vec<Arc<VerifiedTransaction>> {
self.miner.ready_transactions(self, max_len, miner::PendingOrdering::Priority)
fn transactions_to_propagate(&self) -> Vec<Arc<VerifiedTransaction>> {
self.miner.ready_transactions(self, 4096, miner::PendingOrdering::Priority)
}
fn signing_chain_id(&self) -> Option<u64> { None }

View File

@ -320,8 +320,8 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra
/// Get last hashes starting from best block.
fn last_hashes(&self) -> LastHashes;
/// List all transactions that are allowed into the next block.
fn ready_transactions(&self, max_len: usize) -> Vec<Arc<VerifiedTransaction>>;
/// List all ready transactions that should be propagated to other peers.
fn transactions_to_propagate(&self) -> Vec<Arc<VerifiedTransaction>>;
/// Sorted list of transaction gas prices from at least last sample_size blocks.
fn gas_price_corpus(&self, sample_size: usize) -> ::stats::Corpus<U256> {

View File

@ -19,7 +19,7 @@ use std::cmp;
use std::collections::BTreeMap;
use std::sync::Arc;
use hash::{KECCAK_EMPTY_LIST_RLP};
use ethash::{quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor};
use ethash::{self, quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor};
use ethereum_types::{H256, H64, U256, Address};
use unexpected::{OutOfBounds, Mismatch};
use block::*;
@ -311,7 +311,7 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { min: Some(min_difficulty), max: None, found: header.difficulty().clone() })))
}
let difficulty = Ethash::boundary_to_difficulty(&H256(quick_get_difficulty(
let difficulty = ethash::boundary_to_difficulty(&H256(quick_get_difficulty(
&header.bare_hash().0,
seal.nonce.low_u64(),
&seal.mix_hash.0
@ -333,7 +333,7 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, seal.nonce.low_u64());
let mix = H256(result.mix_hash);
let difficulty = Ethash::boundary_to_difficulty(&H256(result.value));
let difficulty = ethash::boundary_to_difficulty(&H256(result.value));
trace!(target: "miner", "num: {num}, seed: {seed}, h: {h}, non: {non}, mix: {mix}, res: {res}",
num = header.number() as u64,
seed = H256(slow_hash_block_number(header.number() as u64)),
@ -452,25 +452,6 @@ impl Ethash {
}
target
}
/// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`.
pub fn boundary_to_difficulty(boundary: &H256) -> U256 {
let d = U256::from(*boundary);
if d <= U256::one() {
U256::max_value()
} else {
((U256::one() << 255) / d) << 1
}
}
/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`.
pub fn difficulty_to_boundary(difficulty: &U256) -> H256 {
if *difficulty <= U256::one() {
U256::max_value().into()
} else {
(((U256::one() << 255) / *difficulty) << 1).into()
}
}
}
fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number:u64) -> (u64, U256) {
@ -771,16 +752,6 @@ mod tests {
}
}
#[test]
fn test_difficulty_to_boundary() {
// result of f(0) is undefined, so do not assert the result
let _ = Ethash::difficulty_to_boundary(&U256::from(0));
assert_eq!(Ethash::difficulty_to_boundary(&U256::from(1)), H256::from(U256::max_value()));
assert_eq!(Ethash::difficulty_to_boundary(&U256::from(2)), H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(Ethash::difficulty_to_boundary(&U256::from(4)), H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap());
assert_eq!(Ethash::difficulty_to_boundary(&U256::from(32)), H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap());
}
#[test]
fn difficulty_frontier() {
let machine = new_homestead_test_machine();

View File

@ -391,7 +391,7 @@ impl Miner {
let max_transactions = if min_tx_gas.is_zero() {
usize::max_value()
} else {
MAX_SKIPPED_TRANSACTIONS.saturating_add((*open_block.block().header().gas_limit() / min_tx_gas).as_u64() as usize)
MAX_SKIPPED_TRANSACTIONS.saturating_add(cmp::min(*open_block.block().header().gas_limit() / min_tx_gas, u64::max_value().into()).as_u64() as usize)
};
let pending: Vec<Arc<_>> = self.transaction_queue.pending(

View File

@ -23,7 +23,7 @@ use std::fmt;
use client::{Client, ImportSealedBlock};
use ethereum_types::{H64, H256, clean_0x, U256};
use ethereum::ethash::Ethash;
use ethash::SeedHashCompute;
use ethash::{self, SeedHashCompute};
use ethcore_miner::work_notify::NotifyWork;
use ethcore_stratum::{
JobDispatcher, PushWorkHandler,
@ -166,7 +166,7 @@ impl StratumJobDispatcher {
/// Serializes payload for stratum service
fn payload(&self, pow_hash: H256, difficulty: U256, number: u64) -> String {
// TODO: move this to engine
let target = Ethash::difficulty_to_boundary(&difficulty);
let target = ethash::difficulty_to_boundary(&difficulty);
let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]);
format!(

View File

@ -345,11 +345,11 @@ fn does_not_propagate_delayed_transactions() {
client.miner().import_own_transaction(&*client, tx0).unwrap();
client.miner().import_own_transaction(&*client, tx1).unwrap();
assert_eq!(0, client.ready_transactions(10).len());
assert_eq!(0, client.transactions_to_propagate().len());
assert_eq!(0, client.miner().ready_transactions(&*client, 10, PendingOrdering::Priority).len());
push_blocks_to_client(&client, 53, 2, 2);
client.flush_queue();
assert_eq!(2, client.ready_transactions(10).len());
assert_eq!(2, client.transactions_to_propagate().len());
assert_eq!(2, client.miner().ready_transactions(&*client, 10, PendingOrdering::Priority).len());
}

View File

@ -149,12 +149,6 @@ const MAX_NEW_HASHES: usize = 64;
const MAX_NEW_BLOCK_AGE: BlockNumber = 20;
// maximal packet size with transactions (cannot be greater than 16MB - protocol limitation).
const MAX_TRANSACTION_PACKET_SIZE: usize = 8 * 1024 * 1024;
// Maximal number of transactions queried from miner to propagate.
// This set is used to diff with transactions known by the peer and
// we will send a difference of length up to `MAX_TRANSACTIONS_TO_PROPAGATE`.
const MAX_TRANSACTIONS_TO_QUERY: usize = 4096;
// Maximal number of transactions in sent in single packet.
const MAX_TRANSACTIONS_TO_PROPAGATE: usize = 64;
// Min number of blocks to be behind for a snapshot sync
const SNAPSHOT_RESTORE_THRESHOLD: BlockNumber = 30000;
const SNAPSHOT_MIN_PEERS: usize = 3;
@ -761,14 +755,24 @@ impl ChainSync {
}
}
// Only ask for old blocks if the peer has a higher difficulty
if force || higher_difficulty {
// Only ask for old blocks if the peer has a higher difficulty than the last imported old block
let last_imported_old_block_difficulty = self.old_blocks.as_mut().and_then(|d| {
io.chain().block_total_difficulty(BlockId::Number(d.last_imported_block_number()))
});
if force || last_imported_old_block_difficulty.map_or(true, |ld| peer_difficulty.map_or(true, |pd| pd > ld)) {
if let Some(request) = self.old_blocks.as_mut().and_then(|d| d.request_blocks(io, num_active_peers)) {
SyncRequester::request_blocks(self, io, peer_id, request, BlockSet::OldBlocks);
return;
}
} else {
trace!(target: "sync", "peer {} is not suitable for asking old blocks", peer_id);
trace!(
target: "sync",
"peer {:?} is not suitable for requesting old blocks, last_imported_old_block_difficulty={:?}, peer_difficulty={:?}",
peer_id,
last_imported_old_block_difficulty,
peer_difficulty
);
self.deactivate_peer(io, peer_id);
}
},

View File

@ -29,11 +29,9 @@ use transaction::SignedTransaction;
use super::{
random,
ChainSync,
MAX_TRANSACTION_PACKET_SIZE,
MAX_PEER_LAG_PROPAGATION,
MAX_PEERS_PROPAGATION,
MAX_TRANSACTION_PACKET_SIZE,
MAX_TRANSACTIONS_TO_PROPAGATE,
MAX_TRANSACTIONS_TO_QUERY,
MIN_PEERS_PROPAGATION,
CONSENSUS_DATA_PACKET,
NEW_BLOCK_HASHES_PACKET,
@ -115,7 +113,7 @@ impl SyncPropagator {
return 0;
}
let transactions = io.chain().ready_transactions(MAX_TRANSACTIONS_TO_QUERY);
let transactions = io.chain().transactions_to_propagate();
if transactions.is_empty() {
return 0;
}
@ -178,7 +176,6 @@ impl SyncPropagator {
// Get hashes of all transactions to send to this peer
let to_send = all_transactions_hashes.difference(&peer_info.last_sent_transactions)
.take(MAX_TRANSACTIONS_TO_PROPAGATE)
.cloned()
.collect::<HashSet<_>>();
if to_send.is_empty() {

View File

@ -24,6 +24,7 @@ use hash::Address;
pub struct EthashParams {
/// See main EthashParams docs.
#[serde(rename="minimumDifficulty")]
#[serde(deserialize_with="uint::validate_non_zero")]
pub minimum_difficulty: Uint,
/// See main EthashParams docs.
#[serde(rename="difficultyBoundDivisor")]

View File

@ -462,7 +462,7 @@
<key>OVERWRITE_PERMISSIONS</key>
<false/>
<key>VERSION</key>
<string>1.11.8</string>
<string>1.11.10</string>
</dict>
<key>UUID</key>
<string>2DCD5B81-7BAF-4DA1-9251-6274B089FD36</string>

View File

@ -67,19 +67,10 @@ impl WorkPoster {
}
}
/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`.
fn difficulty_to_boundary(difficulty: &U256) -> H256 {
if *difficulty <= U256::one() {
U256::max_value().into()
} else {
(((U256::one() << 255) / *difficulty) << 1).into()
}
}
impl NotifyWork for WorkPoster {
fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) {
// TODO: move this to engine
let target = difficulty_to_boundary(&difficulty);
let target = ethash::difficulty_to_boundary(&difficulty);
let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]);
let body = format!(

View File

@ -10,7 +10,7 @@
!define DESCRIPTION "Fast, light, robust Ethereum implementation"
!define VERSIONMAJOR 1
!define VERSIONMINOR 11
!define VERSIONBUILD 8
!define VERSIONBUILD 10
!define ARGS ""
!define FIRST_START_ARGS "--mode=passive ui"

View File

@ -61,6 +61,7 @@ parity-reactor = { path = "../util/reactor" }
parity-updater = { path = "../updater" }
parity-version = { path = "../util/version" }
patricia-trie = { path = "../util/patricia_trie" }
plain_hasher = { path = "../util/plain_hasher" }
rlp = { path = "../util/rlp" }
stats = { path = "../util/stats" }
vm = { path = "../ethcore/vm" }

View File

@ -70,6 +70,7 @@ extern crate stats;
extern crate keccak_hash as hash;
extern crate hardware_wallet;
extern crate patricia_trie as trie;
extern crate plain_hasher;
#[macro_use]
extern crate log;

View File

@ -19,6 +19,7 @@
use std::fmt;
use ethcore::account_provider::{SignError as AccountError};
use ethcore::client::BlockId;
use ethcore::error::{Error as EthcoreError, ErrorKind, CallError};
use jsonrpc_core::{futures, Error, ErrorCode, Value};
use rlp::DecoderError;
@ -108,6 +109,14 @@ pub fn request_rejected_limit() -> Error {
}
}
pub fn request_rejected_param_limit(limit: u64, items_desc: &str) -> Error {
Error {
code: ErrorCode::ServerError(codes::REQUEST_REJECTED_LIMIT),
message: format!("Requested data size exceeds limit of {} {}.", limit, items_desc),
data: None,
}
}
pub fn account<T: fmt::Debug>(error: &str, details: T) -> Error {
Error {
code: ErrorCode::ServerError(codes::ACCOUNT_ERROR),
@ -438,6 +447,19 @@ pub fn filter_not_found() -> Error {
}
}
pub fn filter_block_not_found(id: BlockId) -> Error {
Error {
code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), // Specified in EIP-234.
message: "One of the blocks specified in filter (fromBlock, toBlock or blockHash) cannot be found".into(),
data: Some(Value::String(match id {
BlockId::Hash(hash) => format!("0x{:x}", hash),
BlockId::Number(number) => format!("0x{:x}", number),
BlockId::Earliest => "earliest".to_string(),
BlockId::Latest => "latest".to_string(),
})),
}
}
// on-demand sender cancelled.
pub fn on_demand_cancel(_cancel: futures::sync::oneshot::Canceled) -> Error {
internal("on-demand sender cancelled", "")

View File

@ -16,36 +16,40 @@
//! Helpers for fetching blockchain data either from the light client or the network.
use std::cmp;
use std::sync::Arc;
use ethcore::basic_account::BasicAccount;
use ethcore::encoded;
use ethcore::executed::{Executed, ExecutionError};
use ethcore::ids::BlockId;
use ethcore::filter::Filter as EthcoreFilter;
use ethcore::receipt::Receipt;
use jsonrpc_core::{BoxFuture, Result};
use jsonrpc_core::{Result, Error};
use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::futures::future::Either;
use jsonrpc_macros::Trailing;
use light::cache::Cache;
use light::client::LightChainClient;
use light::cht;
use light::on_demand::{request, OnDemand, HeaderRef, Request as OnDemandRequest, Response as OnDemandResponse};
use light::{cht, MAX_HEADERS_PER_REQUEST};
use light::on_demand::{
request, OnDemand, HeaderRef, Request as OnDemandRequest,
Response as OnDemandResponse, ExecutionResult,
};
use light::request::Field;
use sync::LightSync;
use ethereum_types::{U256, Address};
use hash::H256;
use parking_lot::Mutex;
use plain_hasher::H256FastMap;
use transaction::{Action, Transaction as EthTransaction, SignedTransaction, LocalizedTransaction};
use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch};
use v1::types::{BlockNumber, CallRequest, Log, Transaction};
const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed";
const NO_INVALID_BACK_REFS: &str = "Fails only on invalid back-references; back-references here known to be valid; qed";
/// Helper for fetching blockchain data either from the light client or the network
/// as necessary.
@ -86,10 +90,6 @@ pub fn extract_transaction_at_index(block: encoded::Block, index: usize, eip86_t
.map(|tx| Transaction::from_localized(tx, eip86_transition))
}
/// Type alias for convenience.
pub type ExecutionResult = ::std::result::Result<Executed, ExecutionError>;
// extract the header indicated by the given `HeaderRef` from the given responses.
// fails only if they do not correspond.
fn extract_header(res: &[OnDemandResponse], header: HeaderRef) -> Option<encoded::Header> {
@ -138,58 +138,57 @@ impl LightFetch {
}
/// Get a block header from the on demand service or client, or error.
pub fn header(&self, id: BlockId) -> BoxFuture<encoded::Header> {
pub fn header(&self, id: BlockId) -> impl Future<Item = encoded::Header, Error = Error> + Send {
let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r,
Err(e) => return Box::new(future::err(e)),
Err(e) => return Either::A(future::err(e)),
};
self.send_requests(reqs, |res|
Either::B(self.send_requests(reqs, |res|
extract_header(&res, header_ref)
.expect("these responses correspond to requests that header_ref belongs to \
therefore it will not fail; qed")
)
))
}
/// Helper for getting contract code at a given block.
pub fn code(&self, address: Address, id: BlockId) -> BoxFuture<Vec<u8>> {
pub fn code(&self, address: Address, id: BlockId) -> impl Future<Item = Vec<u8>, Error = Error> + Send {
let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r,
Err(e) => return Box::new(future::err(e)),
Err(e) => return Either::A(future::err(e)),
};
reqs.push(request::Account { header: header_ref.clone(), address: address }.into());
let account_idx = reqs.len() - 1;
reqs.push(request::Code { header: header_ref, code_hash: Field::back_ref(account_idx, 0) }.into());
self.send_requests(reqs, |mut res| match res.pop() {
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
Some(OnDemandResponse::Code(code)) => code,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
})
}))
}
/// Helper for getting account info at a given block.
/// `None` indicates the account doesn't exist at the given block.
pub fn account(&self, address: Address, id: BlockId) -> BoxFuture<Option<BasicAccount>> {
pub fn account(&self, address: Address, id: BlockId) -> impl Future<Item = Option<BasicAccount>, Error = Error> + Send {
let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r,
Err(e) => return Box::new(future::err(e)),
Err(e) => return Either::A(future::err(e)),
};
reqs.push(request::Account { header: header_ref, address: address }.into());
self.send_requests(reqs, |mut res|match res.pop() {
Either::B(self.send_requests(reqs, |mut res|match res.pop() {
Some(OnDemandResponse::Account(acc)) => acc,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
})
}))
}
/// Helper for getting proved execution.
pub fn proved_execution(&self, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<ExecutionResult> {
pub fn proved_execution(&self, req: CallRequest, num: Trailing<BlockNumber>) -> impl Future<Item = ExecutionResult, Error = Error> + Send {
const DEFAULT_GAS_PRICE: u64 = 21_000;
// starting gas when gas not provided.
const START_GAS: u64 = 50_000;
@ -210,7 +209,7 @@ impl LightFetch {
}
};
let from = req.from.unwrap_or(Address::zero());
let from = req.from.unwrap_or_else(|| Address::zero());
let nonce_fut = match req.nonce {
Some(nonce) => Either::A(future::ok(Some(nonce))),
None => Either::B(self.account(from, id).map(|acc| acc.map(|a| a.nonce))),
@ -235,29 +234,16 @@ impl LightFetch {
// fetch missing transaction fields from the network.
Box::new(nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| {
let action = req.to.map_or(Action::Create, Action::Call);
let value = req.value.unwrap_or_else(U256::zero);
let data = req.data.unwrap_or_default();
future::done(match (nonce, req.gas) {
(Some(n), Some(gas)) => Ok((true, EthTransaction {
nonce: n,
action: action,
gas: gas,
gas_price: gas_price,
value: value,
data: data,
})),
(Some(n), None) => Ok((false, EthTransaction {
nonce: n,
action: action,
gas: START_GAS.into(),
gas_price: gas_price,
value: value,
data: data,
})),
(None, _) => Err(errors::unknown_block()),
})
future::done(
Ok((req.gas.is_some(), EthTransaction {
nonce: nonce.unwrap_or_default(),
action: req.to.map_or(Action::Create, Action::Call),
gas: req.gas.unwrap_or_else(|| START_GAS.into()),
gas_price,
value: req.value.unwrap_or_else(U256::zero),
data: req.data.unwrap_or_default(),
}))
)
}).join(header_fut).and_then(move |((gas_known, tx), hdr)| {
// then request proved execution.
// TODO: get last-hashes from network.
@ -279,97 +265,109 @@ impl LightFetch {
}
/// Get a block itself. Fails on unknown block ID.
pub fn block(&self, id: BlockId) -> BoxFuture<encoded::Block> {
pub fn block(&self, id: BlockId) -> impl Future<Item = encoded::Block, Error = Error> + Send {
let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r,
Err(e) => return Box::new(future::err(e)),
Err(e) => return Either::A(future::err(e)),
};
reqs.push(request::Body(header_ref).into());
self.send_requests(reqs, |mut res| match res.pop() {
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
Some(OnDemandResponse::Body(b)) => b,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
})
}))
}
/// Get the block receipts. Fails on unknown block ID.
pub fn receipts(&self, id: BlockId) -> BoxFuture<Vec<Receipt>> {
pub fn receipts(&self, id: BlockId) -> impl Future<Item = Vec<Receipt>, Error = Error> + Send {
let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r,
Err(e) => return Box::new(future::err(e)),
Err(e) => return Either::A(future::err(e)),
};
reqs.push(request::BlockReceipts(header_ref).into());
self.send_requests(reqs, |mut res| match res.pop() {
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
Some(OnDemandResponse::Receipts(b)) => b,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
})
}))
}
/// Get transaction logs
pub fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>> {
pub fn logs(&self, filter: EthcoreFilter) -> impl Future<Item = Vec<Log>, Error = Error> + Send {
use std::collections::BTreeMap;
use jsonrpc_core::futures::stream::{self, Stream};
// early exit for "to" block before "from" block.
let best_number = self.client.chain_info().best_block_number;
let block_number = |id| match id {
BlockId::Earliest => Some(0),
BlockId::Latest => Some(best_number),
BlockId::Hash(h) => self.client.block_header(BlockId::Hash(h)).map(|hdr| hdr.number()),
BlockId::Number(x) => Some(x),
};
const MAX_BLOCK_RANGE: u64 = 1000;
match (block_number(filter.to_block), block_number(filter.from_block)) {
(Some(to), Some(from)) if to < from => return Box::new(future::ok(Vec::new())),
(Some(_), Some(_)) => {},
_ => return Box::new(future::err(errors::unknown_block())),
}
let fetcher = self.clone();
self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE)
.and_then(move |mut headers| {
if headers.is_empty() {
return Either::A(future::ok(Vec::new()));
}
let maybe_future = self.sync.with_context(move |ctx| {
// find all headers which match the filter, and fetch the receipts for each one.
// match them with their numbers for easy sorting later.
let bit_combos = filter.bloom_possibilities();
let receipts_futures: Vec<_> = self.client.ancestry_iter(filter.to_block)
.take_while(|ref hdr| BlockId::Number(hdr.number()) != filter.from_block)
.take_while(|ref hdr| BlockId::Hash(hdr.hash()) != filter.from_block)
.filter(|ref hdr| {
let hdr_bloom = hdr.log_bloom();
bit_combos.iter().find(|&bloom| hdr_bloom & *bloom == *bloom).is_some()
})
.map(|hdr| (hdr.number(), request::BlockReceipts(hdr.into())))
.map(|(num, req)| self.on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS).map(move |x| (num, x)))
.collect();
let on_demand = &fetcher.on_demand;
// as the receipts come in, find logs within them which match the filter.
// insert them into a BTreeMap to maintain order by number and block index.
stream::futures_unordered(receipts_futures)
.fold(BTreeMap::new(), move |mut matches, (num, receipts)| {
for (block_index, log) in receipts.into_iter().flat_map(|r| r.logs).enumerate() {
if filter.matches(&log) {
matches.insert((num, block_index), log.into());
}
}
future::ok(matches)
}) // and then collect them into a vector.
.map(|matches| matches.into_iter().map(|(_, v)| v).collect())
.map_err(errors::on_demand_cancel)
});
let maybe_future = fetcher.sync.with_context(move |ctx| {
// find all headers which match the filter, and fetch the receipts for each one.
// match them with their numbers for easy sorting later.
let bit_combos = filter.bloom_possibilities();
let receipts_futures: Vec<_> = headers.drain(..)
.filter(|ref hdr| {
let hdr_bloom = hdr.log_bloom();
bit_combos.iter().any(|bloom| hdr_bloom.contains_bloom(bloom))
})
.map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into())))
.map(|(num, hash, req)| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS).map(move |x| (num, hash, x)))
.collect();
match maybe_future {
Some(fut) => Box::new(fut),
None => Box::new(future::err(errors::network_disabled())),
}
// as the receipts come in, find logs within them which match the filter.
// insert them into a BTreeMap to maintain order by number and block index.
stream::futures_unordered(receipts_futures)
.fold(BTreeMap::new(), move |mut matches, (num, hash, receipts)| {
let mut block_index = 0;
for (transaction_index, receipt) in receipts.into_iter().enumerate() {
for (transaction_log_index, log) in receipt.logs.into_iter().enumerate() {
if filter.matches(&log) {
matches.insert((num, block_index), Log {
address: log.address.into(),
topics: log.topics.into_iter().map(Into::into).collect(),
data: log.data.into(),
block_hash: Some(hash.into()),
block_number: Some(num.into()),
// No way to easily retrieve transaction hash, so let's just skip it.
transaction_hash: None,
transaction_index: Some(transaction_index.into()),
log_index: Some(block_index.into()),
transaction_log_index: Some(transaction_log_index.into()),
log_type: "mined".into(),
removed: false,
});
}
block_index += 1;
}
}
future::ok(matches)
}) // and then collect them into a vector.
.map(|matches| matches.into_iter().map(|(_, v)| v).collect())
.map_err(errors::on_demand_cancel)
});
match maybe_future {
Some(fut) => Either::B(Either::A(fut)),
None => Either::B(Either::B(future::err(errors::network_disabled()))),
}
})
}
// Get a transaction by hash. also returns the index in the block.
// Only returns transactions in the canonical chain.
pub fn transaction_by_hash(&self, tx_hash: H256, eip86_transition: u64)
-> BoxFuture<Option<(Transaction, usize)>>
-> impl Future<Item = Option<(Transaction, usize)>, Error = Error> + Send
{
let params = (self.sync.clone(), self.on_demand.clone());
let fetcher: Self = self.clone();
@ -425,7 +423,7 @@ impl LightFetch {
}))
}
fn send_requests<T, F>(&self, reqs: Vec<OnDemandRequest>, parse_response: F) -> BoxFuture<T> where
fn send_requests<T, F>(&self, reqs: Vec<OnDemandRequest>, parse_response: F) -> impl Future<Item = T, Error = Error> + Send where
F: FnOnce(Vec<OnDemandResponse>) -> T + Send + 'static,
T: Send + 'static,
{
@ -438,9 +436,153 @@ impl LightFetch {
match maybe_future {
Some(recv) => recv,
None => Box::new(future::err(errors::network_disabled()))
None => Box::new(future::err(errors::network_disabled())) as Box<Future<Item = _, Error = _> + Send>
}
}
fn headers_range_by_block_id(
&self,
from_block: BlockId,
to_block: BlockId,
max: u64
) -> impl Future<Item = Vec<encoded::Header>, Error = Error> {
let fetch_hashes = [from_block, to_block].iter()
.filter_map(|block_id| match block_id {
BlockId::Hash(hash) => Some(hash.clone()),
_ => None,
})
.collect::<Vec<_>>();
let best_number = self.client.chain_info().best_block_number;
let fetcher = self.clone();
self.headers_by_hash(&fetch_hashes[..]).and_then(move |mut header_map| {
let (from_block_num, to_block_num) = {
let block_number = |id| match id {
&BlockId::Earliest => 0,
&BlockId::Latest => best_number,
&BlockId::Hash(ref h) =>
header_map.get(h).map(|hdr| hdr.number())
.expect("from_block and to_block headers are fetched by hash; this closure is only called on from_block and to_block; qed"),
&BlockId::Number(x) => x,
};
(block_number(&from_block), block_number(&to_block))
};
if to_block_num < from_block_num {
// early exit for "to" block before "from" block.
return Either::A(future::err(errors::filter_block_not_found(to_block)));
} else if to_block_num - from_block_num >= max {
return Either::A(future::err(errors::request_rejected_param_limit(max, "blocks")));
}
let to_header_hint = match to_block {
BlockId::Hash(ref h) => header_map.remove(h),
_ => None,
};
let headers_fut = fetcher.headers_range(from_block_num, to_block_num, to_header_hint);
Either::B(headers_fut.map(move |headers| {
// Validate from_block if it's a hash
let last_hash = headers.last().map(|hdr| hdr.hash());
match (last_hash, from_block) {
(Some(h1), BlockId::Hash(h2)) if h1 != h2 => Vec::new(),
_ => headers,
}
}))
})
}
fn headers_by_hash(&self, hashes: &[H256]) -> impl Future<Item = H256FastMap<encoded::Header>, Error = Error> {
let mut refs = H256FastMap::with_capacity_and_hasher(hashes.len(), Default::default());
let mut reqs = Vec::with_capacity(hashes.len());
for hash in hashes {
refs.entry(*hash).or_insert_with(|| {
self.make_header_requests(BlockId::Hash(*hash), &mut reqs)
.expect("make_header_requests never fails for BlockId::Hash; qed")
});
}
self.send_requests(reqs, move |res| {
let headers = refs.drain()
.map(|(hash, header_ref)| {
let hdr = extract_header(&res, header_ref)
.expect("these responses correspond to requests that header_ref belongs to; \
qed");
(hash, hdr)
})
.collect();
headers
})
}
fn headers_range(
&self,
from_number: u64,
to_number: u64,
to_header_hint: Option<encoded::Header>
) -> impl Future<Item = Vec<encoded::Header>, Error = Error> {
let range_length = (to_number - from_number + 1) as usize;
let mut headers: Vec<encoded::Header> = Vec::with_capacity(range_length);
let iter_start = match to_header_hint {
Some(hdr) => {
let block_id = BlockId::Hash(hdr.parent_hash());
headers.push(hdr);
block_id
}
None => BlockId::Number(to_number),
};
headers.extend(self.client.ancestry_iter(iter_start)
.take_while(|hdr| hdr.number() >= from_number));
let fetcher = self.clone();
future::loop_fn(headers, move |mut headers| {
let remaining = range_length - headers.len();
if remaining == 0 {
return Either::A(future::ok(future::Loop::Break(headers)));
}
let mut reqs: Vec<request::Request> = Vec::with_capacity(2);
let start_hash = if let Some(hdr) = headers.last() {
hdr.parent_hash().into()
} else {
let cht_root = cht::block_to_cht_number(to_number)
.and_then(|cht_num| fetcher.client.cht_root(cht_num as usize));
let cht_root = match cht_root {
Some(cht_root) => cht_root,
None => return Either::A(future::err(errors::unknown_block())),
};
let header_proof = request::HeaderProof::new(to_number, cht_root)
.expect("HeaderProof::new is Some(_) if cht::block_to_cht_number() is Some(_); \
this would return above if block_to_cht_number returned None; qed");
let idx = reqs.len();
let hash_ref = Field::back_ref(idx, 0);
reqs.push(header_proof.into());
hash_ref
};
let max = cmp::min(remaining as u64, MAX_HEADERS_PER_REQUEST);
reqs.push(request::HeaderWithAncestors {
block_hash: start_hash,
ancestor_count: max - 1,
}.into());
Either::B(fetcher.send_requests(reqs, |mut res| {
match res.last_mut() {
Some(&mut OnDemandResponse::HeaderWithAncestors(ref mut res_headers)) =>
headers.extend(res_headers.drain(..)),
_ => panic!("reqs has at least one entry; each request maps to a response; qed"),
};
future::Loop::Continue(headers)
}))
})
}
}
#[derive(Clone)]
@ -456,7 +598,7 @@ struct ExecuteParams {
// has a peer execute the transaction with given params. If `gas_known` is false,
// this will double the gas on each `OutOfGas` error.
fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResult> {
fn execute_tx(gas_known: bool, params: ExecuteParams) -> impl Future<Item = ExecutionResult, Error = Error> + Send {
if !gas_known {
Box::new(future::loop_fn(params, |mut params| {
execute_tx(true, params.clone()).and_then(move |res| {
@ -479,7 +621,7 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResu
failed => Ok(future::Loop::Break(failed)),
}
})
}))
})) as Box<Future<Item = _, Error = _> + Send>
} else {
trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand",
params.tx.gas);
@ -500,8 +642,8 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResu
});
match proved_future {
Some(fut) => Box::new(fut),
None => Box::new(future::err(errors::network_disabled())),
Some(fut) => Box::new(fut) as Box<Future<Item = _, Error = _> + Send>,
None => Box::new(future::err(errors::network_disabled())) as Box<Future<Item = _, Error = _> + Send>,
}
}
}

View File

@ -24,10 +24,9 @@ use rlp::{self, Rlp};
use ethereum_types::{U256, H64, H160, H256, Address};
use parking_lot::Mutex;
use ethash::SeedHashCompute;
use ethash::{self, SeedHashCompute};
use ethcore::account_provider::{AccountProvider, DappId};
use ethcore::client::{BlockChainClient, BlockId, TransactionId, UncleId, StateOrBlock, StateClient, StateInfo, Call, EngineInfo};
use ethcore::ethereum::Ethash;
use ethcore::filter::Filter as EthcoreFilter;
use ethcore::header::{BlockNumber as EthBlockNumber};
use ethcore::log_entry::LogEntry;
@ -740,9 +739,11 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM, T: StateInfo + 'static> Eth for EthClient<
// check if we're still syncing and return empty strings in that case
{
//TODO: check if initial sync is complete here
//let sync = self.sync;
if /*sync.status().state != SyncState::Idle ||*/ self.client.queue_info().total_queue_size() > MAX_QUEUE_SIZE_TO_MINE_ON {
let sync_status = self.sync.status();
let queue_info = self.client.queue_info();
let total_queue_size = queue_info.total_queue_size();
if is_major_importing(Some(sync_status.state), queue_info) || total_queue_size > MAX_QUEUE_SIZE_TO_MINE_ON {
trace!(target: "miner", "Syncing. Cannot give any work.");
return Err(errors::no_work());
}
@ -765,7 +766,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM, T: StateInfo + 'static> Eth for EthClient<
})?;
let (pow_hash, number, timestamp, difficulty) = work;
let target = Ethash::difficulty_to_boundary(&difficulty);
let target = ethash::difficulty_to_boundary(&difficulty);
let seed_hash = self.seed_compute.lock().hash_block_number(number);
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();

View File

@ -199,7 +199,7 @@ impl LightClient for LightFetch {
}
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> {
LightFetch::logs(self, filter)
Box::new(LightFetch::logs(self, filter)) as BoxFuture<_>
}
}

View File

@ -539,7 +539,7 @@ impl<T: LightChainClient + 'static> Filterable for EthClient<T> {
}
fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>> {
self.fetcher().logs(filter)
Box::new(self.fetcher().logs(filter)) as BoxFuture<_>
}
fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec<Log> {

View File

@ -32,7 +32,7 @@ impl IoHandler<NetworkIoMessage> for HostHandler {
if let NetworkIoMessage::NetworkStarted(ref public_url) = *message {
let mut url = self.public_url.write();
if url.as_ref().map_or(true, |uref| uref != public_url) {
info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(public_url.as_ref()));
info!(target: "network", "Public node URL: {}", Colour::White.bold().paint(AsRef::<str>::as_ref(public_url)));
}
*url = Some(public_url.to_owned());
}

View File

@ -3,7 +3,7 @@
[package]
name = "parity-version"
# NOTE: this value is used for Parity version string (via env CARGO_PKG_VERSION)
version = "1.11.8"
version = "1.11.10"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"