Backports for Stable 2.2.10 (#10332)

* version: bump stable to 2.2.10

* import rpc transactions sequentially (#10051)

* import rpc transactions sequentially

* use impl trait in argument position, renamed ProspectiveDispatcher to WithPostSign

* grouped imports

* integrates PostSign with ProspectiveSigner

* fix spaces, removed unnecessary type cast and duplicate polling

* clean up code style

* Apply suggestions from code review

* Additional tests for uint deserialization. (#10279)

* Don't run the CPP example on CI (#10285)

* Don't run the CPP example on CI

* Add comment

* CI optimizations (#10297)

* CI optimizations

* fix stripping

* new dockerfile

* no need n submodule upd

* review

* moved dockerfile

* it becomes large

* onchain update depends on s3

* fix dependency

* fix cache status

* fix cache status

* new cache status

* fix publish job (#10317)

* fix publish job

* dashes and colonels

* Add Statetest support for Constantinople Fix (#10323)

* Update Ethereum tests repo to v6.0.0-beta.3 tag

* Add spec for St.Peter's / ConstantinopleFix statetests

* fix(add helper for timestamp overflows) (#10330)

* fix(add helper timestamp overflows)

* fix(simplify code)

* fix(make helper private)

* fix(docker): fix not receives SIGINT (#10059)

* fix(docker): fix not receives SIGINT

* fix: update with reviews

* update with review

* update

* update

* snap: official image / test (#10168)

* official image / test

* fix / test

* bit more necromancy

* fix paths

* add source bin/df /test

* add source bin/df /test2

* something w paths /test

* something w paths /test

* add source-type /test

* show paths /test

* copy plugin /test

* plugin -> nil

* install rhash

* no questions while installing rhash

* publish snap only for release

* Don't add discovery initiators to the node table (#10305)

* Don't add discovery initiators to the node table

* Use enums for tracking state of the nodes in discovery

* Dont try to ping ourselves

* Fix minor nits

* Update timeouts when observing an outdated node

* Extracted update_bucket_record from update_node

* Fixed typo

* Fix two final nits from @todr

* change docker image based on debian instead of ubuntu due to the chan… (#10336)

* change docker image based on debian instead of ubuntu due to the changes of the build container

* role back docker build image and docker deploy image to ubuntu:xenial based (#10338)

* perform stripping during build (#10208)

* perform stripping during build (#10208)

* perform stripping during build

* var RUSTFLAGS

* fix(docker-aarch64) : cross-compile config (#9798)

* fix(docker-aarch64) : cross-compile config (#9798)

* ci: remove trailing double newline from dockerfile
This commit is contained in:
Afri Schoedon 2019-02-13 11:00:15 +01:00 committed by GitHub
parent cc91e95f03
commit 7b1d3e180c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 510 additions and 254 deletions

View File

@ -2,11 +2,14 @@ stages:
- test
- build
- publish
- publish-onchain
- optional
image: parity/rust:gitlab-ci
variables:
GIT_STRATEGY: fetch
GIT_SUBMODULE_STRATEGY: recursive
CI_SERVER_NAME: "GitLab CI"
CARGO_HOME: "${CI_PROJECT_DIR}/.cargo"
CARGO_TARGET: x86_64-unknown-linux-gnu
@ -40,25 +43,29 @@ test-linux:
variables:
RUN_TESTS: all
script:
- scripts/gitlab/test-all.sh stable
- scripts/gitlab/test-all.sh
- sccache -s
tags:
- rust-stable
- linux-docker
test-audit:
stage: test
script:
- scripts/gitlab/cargo-audit.sh
- set -e
- set -u
- cargo audit
tags:
- rust-stable
- linux-docker
build-linux:
stage: build
only: *releaseable_branches
script:
- scripts/gitlab/build-unix.sh
- sccache -s
<<: *collect_artifacts
tags:
- rust-stable
- linux-docker
build-darwin:
stage: build
@ -96,9 +103,9 @@ publish-docker:
- scripts/gitlab/publish-docker.sh parity
publish-snap:
stage: publish
stage: optional #publish
only: *releaseable_branches
image: parity/snapcraft:gitlab-ci
image: snapcore/snapcraft
variables:
BUILD_ARCH: amd64
cache: {}
@ -112,19 +119,66 @@ publish-snap:
allow_failure: true
<<: *collect_artifacts
publish-awss3:
stage: publish
publish-onnet-update:
stage: publish-onchain
only: *releaseable_branches
cache: {}
cache: {}
dependencies:
- build-linux
- build-darwin
- build-windows
- publish-awss3-release
before_script: *determine_version
script:
- scripts/gitlab/publish-awss3.sh
- scripts/gitlab/publish-onnet-update.sh
tags:
- shell
- linux-docker
# configures aws for fast uploads/syncs
.s3-before-script: &s3-before-script
before_script:
- mkdir -p ${HOME}/.aws
- |
cat > ${HOME}/.aws/config <<EOC
[default]
s3 =
max_concurrent_requests = 20
max_queue_size = 10000
multipart_threshold = 64MB
multipart_chunksize = 16MB
max_bandwidth = 50MB/s
use_accelerate_endpoint = false
addressing_style = path
EOC
publish-awss3-release:
image: parity/awscli:latest
stage: publish
only: *releaseable_branches
cache: {}
dependencies:
- build-linux
- build-darwin
- build-windows
variables:
GIT_STRATEGY: none
<<: *s3-before-script
script:
- echo "__________Push binaries to AWS S3____________"
- case "${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}" in
(beta|stable|nightly)
export BUCKET=releases.parity.io/ethereum;
;;
(*)
export BUCKET=builds-parity;
;;
esac
- aws s3 sync ./artifacts s3://${BUCKET}/${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}/
after_script:
- aws s3 ls s3://${BUCKET}/${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}/
--recursive --human-readable --summarize
tags:
- linux-docker
publish-docs:
stage: publish
@ -150,22 +204,3 @@ build-android:
allow_failure: true
<<: *collect_artifacts
test-beta:
stage: optional
variables:
RUN_TESTS: cargo
script:
- scripts/gitlab/test-all.sh beta
tags:
- rust-beta
allow_failure: true
test-nightly:
stage: optional
variables:
RUN_TESTS: all
script:
- scripts/gitlab/test-all.sh nightly
tags:
- rust-nightly
allow_failure: true

13
Cargo.lock generated
View File

@ -818,6 +818,7 @@ dependencies = [
"keccak-hash 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-crypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2235,7 +2236,7 @@ version = "1.12.0"
dependencies = [
"jni 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"panic_hook 0.1.0",
"parity-ethereum 2.2.9",
"parity-ethereum 2.2.10",
]
[[package]]
@ -2251,7 +2252,7 @@ dependencies = [
[[package]]
name = "parity-ethereum"
version = "2.2.9"
version = "2.2.10"
dependencies = [
"ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2300,7 +2301,7 @@ dependencies = [
"parity-rpc-client 1.4.0",
"parity-runtime 0.1.0",
"parity-updater 1.12.0",
"parity-version 2.2.9",
"parity-version 2.2.10",
"parity-whisper 0.1.0",
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2455,7 +2456,7 @@ dependencies = [
"parity-crypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-runtime 0.1.0",
"parity-updater 1.12.0",
"parity-version 2.2.9",
"parity-version 2.2.10",
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"patricia-trie 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2552,7 +2553,7 @@ dependencies = [
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-hash-fetch 1.12.0",
"parity-path 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-version 2.2.9",
"parity-version 2.2.10",
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2562,7 +2563,7 @@ dependencies = [
[[package]]
name = "parity-version"
version = "2.2.9"
version = "2.2.10"
dependencies = [
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -2,7 +2,7 @@
description = "Parity Ethereum client"
name = "parity-ethereum"
# NOTE Make sure to update util/version/Cargo.toml as well
version = "2.2.9"
version = "2.2.10"
license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"]

View File

@ -0,0 +1,65 @@
{
"name": "St. Peter's (test)",
"engine": {
"Ethash": {
"params": {
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d",
"blockReward": "0x1BC16D674EC80000",
"homesteadTransition": "0x0",
"eip100bTransition": "0x0",
"difficultyBombDelays": {
"0": 5000000
}
}
}
},
"params": {
"gasLimitBoundDivisor": "0x0400",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x1",
"maxCodeSize": 24576,
"maxCodeSizeTransition": "0x0",
"eip150Transition": "0x0",
"eip160Transition": "0x0",
"eip161abcTransition": "0x0",
"eip161dTransition": "0x0",
"eip140Transition": "0x0",
"eip211Transition": "0x0",
"eip214Transition": "0x0",
"eip155Transition": "0x0",
"eip658Transition": "0x0",
"eip145Transition": "0x0",
"eip1014Transition": "0x0",
"eip1052Transition": "0x0",
"eip1283DisableTransition": "0x0"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x0000000000000042",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x400000000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
"gasLimit": "0x1388"
},
"accounts": {
"0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": "0x00", "pricing": { "modexp": { "divisor": 20 } } } },
"0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": "0x00", "pricing": { "linear": { "base": 500, "word": 0 } } } },
"0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": "0x00", "pricing": { "linear": { "base": 40000, "word": 0 } } } },
"0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": "0x00", "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } }
}
}

@ -1 +1 @@
Subproject commit 420f443477caa8516f1f9ee8122fafc3415c0f34
Subproject commit 725dbc73a54649e22a00330bd0f4d6699a5060e5

View File

@ -87,6 +87,7 @@ impl<'a> EvmTestClient<'a> {
ForkSpec::EIP158 => Some(ethereum::new_eip161_test()),
ForkSpec::Byzantium => Some(ethereum::new_byzantium_test()),
ForkSpec::Constantinople => Some(ethereum::new_constantinople_test()),
ForkSpec::ConstantinopleFix => Some(ethereum::new_constantinople_fix_test()),
ForkSpec::EIP158ToByzantiumAt5 => Some(ethereum::new_transition_test()),
ForkSpec::FrontierToHomesteadAt5 | ForkSpec::HomesteadToDaoAt5 | ForkSpec::HomesteadToEIP150At5 => None,
}

View File

@ -89,6 +89,8 @@ pub enum BlockError {
InvalidNumber(Mismatch<BlockNumber>),
/// Block number isn't sensible.
RidiculousNumber(OutOfBounds<BlockNumber>),
/// Timestamp header overflowed
TimestampOverflow,
/// Too many transactions from a particular address.
TooManyTransactions(Address),
/// Parent given is unknown.
@ -138,6 +140,7 @@ impl fmt::Display for BlockError {
UnknownParent(ref hash) => format!("Unknown parent: {}", hash),
UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash),
UnknownEpochTransition(ref num) => format!("Unknown transition to epoch number: {}", num),
TimestampOverflow => format!("Timestamp overflow"),
TooManyTransactions(ref address) => format!("Too many transactions from: {}", address),
};

View File

@ -143,6 +143,9 @@ pub fn new_byzantium_test() -> Spec { load(None, include_bytes!("../../res/ether
/// Create a new Foundation Constantinople era spec.
pub fn new_constantinople_test() -> Spec { load(None, include_bytes!("../../res/ethereum/constantinople_test.json")) }
/// Create a new Foundation St. Peter's (Contantinople Fix) era spec.
pub fn new_constantinople_fix_test() -> Spec { load(None, include_bytes!("../../res/ethereum/st_peters_test.json")) }
/// Create a new Musicoin-MCIP3-era spec.
pub fn new_mcip3_test() -> Spec { load(None, include_bytes!("../../res/ethereum/mcip3_test.json")) }
@ -163,6 +166,9 @@ pub fn new_byzantium_test_machine() -> EthereumMachine { load_machine(include_by
/// Create a new Foundation Constantinople era spec.
pub fn new_constantinople_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/constantinople_test.json")) }
/// Create a new Foundation St. Peter's (Contantinople Fix) era spec.
pub fn new_constantinople_fix_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/st_peters_test.json")) }
/// Create a new Musicoin-MCIP3-era spec.
pub fn new_mcip3_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/mcip3_test.json")) }

View File

@ -165,6 +165,7 @@ mod state_tests {
declare_test!{GeneralStateTest_stRefundTest, "GeneralStateTests/stRefundTest/"}
declare_test!{GeneralStateTest_stReturnDataTest, "GeneralStateTests/stReturnDataTest/"}
declare_test!{GeneralStateTest_stRevertTest, "GeneralStateTests/stRevertTest/"}
declare_test!{GeneralStateTest_stSStoreTest, "GeneralStateTests/stSStoreTest/"}
declare_test!{GeneralStateTest_stShift, "GeneralStateTests/stShift/"}
declare_test!{GeneralStateTest_stSolidityTest, "GeneralStateTests/stSolidityTest/"}
declare_test!{GeneralStateTest_stSpecialTest, "GeneralStateTests/stSpecialTest/"}
@ -177,7 +178,6 @@ mod state_tests {
declare_test!{GeneralStateTest_stZeroCallsRevert, "GeneralStateTests/stZeroCallsRevert/"}
declare_test!{GeneralStateTest_stZeroCallsTest, "GeneralStateTests/stZeroCallsTest/"}
declare_test!{GeneralStateTest_stZeroKnowledge, "GeneralStateTests/stZeroKnowledge/"}
declare_test!{GeneralStateTest_stSStoreTest, "GeneralStateTests/stSStoreTest/"}
// Attempts to send a transaction that requires more than current balance:
// Tx:

View File

@ -39,6 +39,25 @@ use header::{BlockNumber, Header};
use transaction::SignedTransaction;
use verification::queue::kind::blocks::Unverified;
/// Returns `Ok<SystemTime>` when the result less or equal to `i32::max_value` to prevent `SystemTime` to panic because
/// it is platform specific, may be i32 or i64.
///
/// `Err<BlockError::TimestampOver` otherwise.
///
// FIXME: @niklasad1 - remove this when and use `SystemTime::checked_add`
// when https://github.com/rust-lang/rust/issues/55940 is stabilized.
fn timestamp_checked_add(sys: SystemTime, d2: Duration) -> Result<SystemTime, BlockError> {
let d1 = sys.duration_since(UNIX_EPOCH).map_err(|_| BlockError::TimestampOverflow)?;
let total_time = d1.checked_add(d2).ok_or(BlockError::TimestampOverflow)?;
if total_time.as_secs() <= i32::max_value() as u64 {
Ok(sys + d2)
} else {
Err(BlockError::TimestampOverflow)
}
}
/// Preprocessed block data gathered in `verify_block_unordered` call
pub struct PreverifiedBlock {
/// Populated block header
@ -305,7 +324,7 @@ pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool,
const ACCEPTABLE_DRIFT: Duration = Duration::from_secs(15);
let max_time = SystemTime::now() + ACCEPTABLE_DRIFT;
let invalid_threshold = max_time + ACCEPTABLE_DRIFT * 9;
let timestamp = UNIX_EPOCH + Duration::from_secs(header.timestamp());
let timestamp = timestamp_checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp()))?;
if timestamp > invalid_threshold {
return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: Some(max_time), min: None, found: timestamp })))
@ -327,8 +346,8 @@ fn verify_parent(header: &Header, parent: &Header, engine: &EthEngine) -> Result
let gas_limit_divisor = engine.params().gas_limit_bound_divisor;
if !engine.is_timestamp_valid(header.timestamp(), parent.timestamp()) {
let min = SystemTime::now() + Duration::from_secs(parent.timestamp() + 1);
let found = SystemTime::now() + Duration::from_secs(header.timestamp());
let min = timestamp_checked_add(SystemTime::now(), Duration::from_secs(parent.timestamp().saturating_add(1)))?;
let found = timestamp_checked_add(SystemTime::now(), Duration::from_secs(header.timestamp()))?;
return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(min), found })))
}
if header.number() != parent.number() + 1 {
@ -734,7 +753,8 @@ mod tests {
check_fail_timestamp(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), false);
header = good.clone();
header.set_timestamp(2450000000);
// will return `BlockError::TimestampOverflow` when timestamp > `i32::max_value()`
header.set_timestamp(i32::max_value() as u64);
check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), false);
header = good.clone();
@ -806,4 +826,11 @@ mod tests {
check_fail(unordered_test(&create_test_block_with_data(&header, &bad_transactions, &[]), &engine), TooManyTransactions(keypair.address()));
unordered_test(&create_test_block_with_data(&header, &good_transactions, &[]), &engine).unwrap();
}
#[test]
fn checked_add_systime_dur() {
assert!(timestamp_checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64 + 1, 0)).is_err());
assert!(timestamp_checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64, 0)).is_ok());
assert!(timestamp_checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64 - 1, 1_000_000_000)).is_ok());
}
}

View File

@ -30,6 +30,7 @@ pub enum ForkSpec {
Homestead,
Byzantium,
Constantinople,
ConstantinopleFix,
EIP158ToByzantiumAt5,
FrontierToHomesteadAt5,
HomesteadToDaoAt5,

View File

@ -41,11 +41,11 @@ use sync::LightSync;
use transaction::{Action, SignedTransaction, PendingTransaction, Transaction, Error as TransactionError};
use jsonrpc_core::{BoxFuture, Result, Error};
use jsonrpc_core::futures::{future, Future, Poll, Async};
use jsonrpc_core::futures::{future, Future, Poll, Async, IntoFuture};
use jsonrpc_core::futures::future::Either;
use v1::helpers::{errors, nonce, TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
use v1::types::{
H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes,
H520 as RpcH520, Bytes as RpcBytes,
RichRawTransaction as RpcRichRawTransaction,
ConfirmationPayload as RpcConfirmationPayload,
ConfirmationResponse,
@ -69,12 +69,20 @@ pub trait Dispatcher: Send + Sync + Clone {
fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, force_nonce: bool)
-> BoxFuture<FilledTransactionRequest>;
/// Sign the given transaction request without dispatching, fetching appropriate nonce.
fn sign(&self, accounts: Arc<AccountProvider>, filled: FilledTransactionRequest, password: SignWith)
-> BoxFuture<WithToken<SignedTransaction>>;
/// Sign the given transaction request, fetching appropriate nonce and executing the PostSign action
fn sign<P>(
&self,
accounts: Arc<AccountProvider>,
filled: FilledTransactionRequest,
password: SignWith,
post_sign: P
) -> BoxFuture<P::Item>
where
P: PostSign + 'static,
<P::Out as futures::future::IntoFuture>::Future: Send;
/// Converts a `SignedTransaction` into `RichRawTransaction`
fn enrich(&self, SignedTransaction) -> RpcRichRawTransaction;
fn enrich(&self, signed: SignedTransaction) -> RpcRichRawTransaction;
/// "Dispatch" a local transaction.
fn dispatch_transaction(&self, signed_transaction: PendingTransaction)
@ -164,19 +172,30 @@ impl<C: miner::BlockChainClient + BlockChainClient, M: MinerService> Dispatcher
}))
}
fn sign(&self, accounts: Arc<AccountProvider>, filled: FilledTransactionRequest, password: SignWith)
-> BoxFuture<WithToken<SignedTransaction>>
fn sign<P>(
&self,
accounts: Arc<AccountProvider>,
filled: FilledTransactionRequest,
password: SignWith,
post_sign: P
) -> BoxFuture<P::Item>
where
P: PostSign + 'static,
<P::Out as futures::future::IntoFuture>::Future: Send
{
let chain_id = self.client.signing_chain_id();
if let Some(nonce) = filled.nonce {
return Box::new(future::done(sign_transaction(&*accounts, filled, chain_id, nonce, password)));
let future = sign_transaction(&*accounts, filled, chain_id, nonce, password)
.into_future()
.and_then(move |signed| post_sign.execute(signed));
Box::new(future)
} else {
let state = self.state_nonce(&filled.from);
let reserved = self.nonces.lock().reserve(filled.from, state);
Box::new(ProspectiveSigner::new(accounts, filled, chain_id, reserved, password, post_sign))
}
let state = self.state_nonce(&filled.from);
let reserved = self.nonces.lock().reserve(filled.from, state);
Box::new(ProspectiveSigner::new(accounts, filled, chain_id, reserved, password))
}
fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction {
@ -396,12 +415,24 @@ impl Dispatcher for LightDispatcher {
}))
}
fn sign(&self, accounts: Arc<AccountProvider>, filled: FilledTransactionRequest, password: SignWith)
-> BoxFuture<WithToken<SignedTransaction>>
fn sign<P>(
&self,
accounts: Arc<AccountProvider>,
filled: FilledTransactionRequest,
password: SignWith,
post_sign: P
) -> BoxFuture<P::Item>
where
P: PostSign + 'static,
<P::Out as futures::future::IntoFuture>::Future: Send
{
let chain_id = self.client.signing_chain_id();
let nonce = filled.nonce.expect("nonce is always provided; qed");
Box::new(future::done(sign_transaction(&*accounts, filled, chain_id, nonce, password)))
let future = sign_transaction(&*accounts, filled, chain_id, nonce, password)
.into_future()
.and_then(move |signed| post_sign.execute(signed));
Box::new(future)
}
fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction {
@ -449,28 +480,60 @@ fn sign_transaction(
#[derive(Debug, Clone, Copy)]
enum ProspectiveSignerState {
TryProspectiveSign,
WaitForPostSign,
WaitForNonce,
Finish,
}
struct ProspectiveSigner {
struct ProspectiveSigner<P: PostSign> {
accounts: Arc<AccountProvider>,
filled: FilledTransactionRequest,
chain_id: Option<u64>,
reserved: nonce::Reserved,
password: SignWith,
state: ProspectiveSignerState,
prospective: Option<Result<WithToken<SignedTransaction>>>,
prospective: Option<WithToken<SignedTransaction>>,
ready: Option<nonce::Ready>,
post_sign: Option<P>,
post_sign_future: Option<<P::Out as IntoFuture>::Future>
}
impl ProspectiveSigner {
/// action to execute after signing
/// e.g importing a transaction into the chain
pub trait PostSign: Send {
/// item that this PostSign returns
type Item: Send;
/// incase you need to perform async PostSign actions
type Out: IntoFuture<Item = Self::Item, Error = Error> + Send;
/// perform an action with the signed transaction
fn execute(self, signer: WithToken<SignedTransaction>) -> Self::Out;
}
impl PostSign for () {
type Item = WithToken<SignedTransaction>;
type Out = Result<Self::Item>;
fn execute(self, signed: WithToken<SignedTransaction>) -> Self::Out {
Ok(signed)
}
}
impl<F: Send, T: Send> PostSign for F
where F: FnOnce(WithToken<SignedTransaction>) -> Result<T>
{
type Item = T;
type Out = Result<Self::Item>;
fn execute(self, signed: WithToken<SignedTransaction>) -> Self::Out {
(self)(signed)
}
}
impl<P: PostSign> ProspectiveSigner<P> {
pub fn new(
accounts: Arc<AccountProvider>,
filled: FilledTransactionRequest,
chain_id: Option<u64>,
reserved: nonce::Reserved,
password: SignWith,
post_sign: P
) -> Self {
// If the account is permanently unlocked we can try to sign
// using prospective nonce. This should speed up sending
@ -491,6 +554,8 @@ impl ProspectiveSigner {
},
prospective: None,
ready: None,
post_sign: Some(post_sign),
post_sign_future: None
}
}
@ -509,8 +574,8 @@ impl ProspectiveSigner {
}
}
impl Future for ProspectiveSigner {
type Item = WithToken<SignedTransaction>;
impl<P: PostSign> Future for ProspectiveSigner<P> {
type Item = P::Item;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
@ -523,32 +588,45 @@ impl Future for ProspectiveSigner {
match self.poll_reserved()? {
Async::NotReady => {
self.state = WaitForNonce;
self.prospective = Some(self.sign(self.reserved.prospective_value()));
self.prospective = Some(self.sign(self.reserved.prospective_value())?);
},
Async::Ready(nonce) => {
self.state = Finish;
self.prospective = Some(self.sign(nonce.value()));
self.state = WaitForPostSign;
self.post_sign_future = Some(self.post_sign.take()
.expect("post_sign is set on creation; qed")
.execute(self.sign(nonce.value())?)
.into_future());
self.ready = Some(nonce);
},
}
},
WaitForNonce => {
let nonce = try_ready!(self.poll_reserved());
let result = match (self.prospective.take(), nonce.matches_prospective()) {
let prospective = match (self.prospective.take(), nonce.matches_prospective()) {
(Some(prospective), true) => prospective,
_ => self.sign(nonce.value()),
_ => self.sign(nonce.value())?,
};
self.state = Finish;
self.prospective = Some(result);
self.ready = Some(nonce);
self.state = WaitForPostSign;
self.post_sign_future = Some(self.post_sign.take()
.expect("post_sign is set on creation; qed")
.execute(prospective)
.into_future());
},
Finish => {
if let (Some(result), Some(nonce)) = (self.prospective.take(), self.ready.take()) {
// Mark nonce as used on successful signing
return result.map(move |tx| {
nonce.mark_used();
Async::Ready(tx)
})
WaitForPostSign => {
if let Some(mut fut) = self.post_sign_future.as_mut() {
match fut.poll()? {
Async::Ready(item) => {
let nonce = self.ready
.take()
.expect("nonce is set before state transitions to WaitForPostSign; qed");
nonce.mark_used();
return Ok(Async::Ready(item))
},
Async::NotReady => {
return Ok(Async::NotReady)
}
}
} else {
panic!("Poll after ready.");
}
@ -655,19 +733,21 @@ pub fn execute<D: Dispatcher + 'static>(
match payload {
ConfirmationPayload::SendTransaction(request) => {
let condition = request.condition.clone().map(Into::into);
Box::new(dispatcher.sign(accounts, request, pass)
.map(move |v| v.map(move |tx| PendingTransaction::new(tx, condition)))
.map(WithToken::into_tuple)
.map(|(tx, token)| (tx, token, dispatcher))
.and_then(|(tx, tok, dispatcher)| {
dispatcher.dispatch_transaction(tx)
.map(RpcH256::from)
.map(ConfirmationResponse::SendTransaction)
.map(move |h| WithToken::from((h, tok)))
}))
let cloned_dispatcher = dispatcher.clone();
let post_sign = move |with_token_signed: WithToken<SignedTransaction>| {
let (signed, token) = with_token_signed.into_tuple();
let signed_transaction = PendingTransaction::new(signed, condition);
cloned_dispatcher.dispatch_transaction(signed_transaction)
.map(|hash| (hash, token))
};
let future = dispatcher.sign(accounts, request, pass, post_sign)
.map(|(hash, token)| {
WithToken::from((ConfirmationResponse::SendTransaction(hash.into()), token))
});
Box::new(future)
},
ConfirmationPayload::SignTransaction(request) => {
Box::new(dispatcher.sign(accounts, request, pass)
Box::new(dispatcher.sign(accounts, request, pass, ())
.map(move |result| result
.map(move |tx| dispatcher.enrich(tx))
.map(ConfirmationResponse::SignTransaction)

View File

@ -18,7 +18,7 @@
use std::sync::Arc;
use std::time::Duration;
use bytes::{Bytes, ToPretty};
use bytes::Bytes;
use ethcore::account_provider::AccountProvider;
use transaction::PendingTransaction;
use ethereum_types::{H520, U128, Address};
@ -27,7 +27,7 @@ use ethkey::{public_to_address, recover, Signature};
use jsonrpc_core::{BoxFuture, Result};
use jsonrpc_core::futures::{future, Future};
use v1::helpers::{errors, eip191};
use v1::helpers::dispatch::{self, eth_data_hash, Dispatcher, SignWith};
use v1::helpers::dispatch::{self, eth_data_hash, Dispatcher, SignWith, PostSign, WithToken};
use v1::traits::Personal;
use v1::types::{
H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U128 as RpcU128,
@ -41,6 +41,7 @@ use v1::types::{
use v1::metadata::Metadata;
use eip712::{EIP712, hash_structured_data};
use jsonrpc_core::types::Value;
use transaction::SignedTransaction;
/// Account management (personal) rpc implementation.
pub struct PersonalClient<D: Dispatcher> {
@ -68,7 +69,16 @@ impl<D: Dispatcher> PersonalClient<D> {
}
impl<D: Dispatcher + 'static> PersonalClient<D> {
fn do_sign_transaction(&self, _meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<(PendingTransaction, D)> {
fn do_sign_transaction<P>(
&self,
_meta: Metadata,
request: TransactionRequest,
password: String,
post_sign: P
) -> BoxFuture<P::Item>
where P: PostSign + 'static,
<P::Out as futures::future::IntoFuture>::Future: Send
{
let dispatcher = self.dispatcher.clone();
let accounts = self.accounts.clone();
@ -86,11 +96,7 @@ impl<D: Dispatcher + 'static> PersonalClient<D> {
Box::new(dispatcher.fill_optional_fields(request.into(), default, false)
.and_then(move |filled| {
let condition = filled.condition.clone().map(Into::into);
dispatcher.sign(accounts, filled, SignWith::Password(password.into()))
.map(|tx| tx.into_value())
.map(move |tx| PendingTransaction::new(tx, condition))
.map(move |tx| (tx, dispatcher))
dispatcher.sign(accounts, filled, SignWith::Password(password.into()), post_sign)
})
)
}
@ -223,18 +229,26 @@ impl<D: Dispatcher + 'static> Personal for PersonalClient<D> {
}
fn sign_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<RpcRichRawTransaction> {
Box::new(self.do_sign_transaction(meta, request, password)
.map(|(pending_tx, dispatcher)| dispatcher.enrich(pending_tx.transaction)))
let condition = request.condition.clone().map(Into::into);
let dispatcher = self.dispatcher.clone();
Box::new(self.do_sign_transaction(meta, request, password, ())
.map(move |tx| PendingTransaction::new(tx.into_value(), condition))
.map(move |pending_tx| dispatcher.enrich(pending_tx.transaction)))
}
fn send_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<RpcH256> {
Box::new(self.do_sign_transaction(meta, request, password)
.and_then(|(pending_tx, dispatcher)| {
let chain_id = pending_tx.chain_id();
trace!(target: "miner", "send_transaction: dispatching tx: {} for chain ID {:?}",
::rlp::encode(&*pending_tx).pretty(), chain_id);
dispatcher.dispatch_transaction(pending_tx).map(Into::into)
let condition = request.condition.clone().map(Into::into);
let dispatcher = self.dispatcher.clone();
Box::new(self.do_sign_transaction(meta, request, password, move |signed: WithToken<SignedTransaction>| {
dispatcher.dispatch_transaction(
PendingTransaction::new(
signed.into_value(),
condition
)
)
})
.and_then(|hash| {
Ok(RpcH256::from(hash))
})
)
}

View File

@ -1,8 +1,5 @@
FROM ubuntu:xenial
MAINTAINER Parity Technologies <devops@parity.io>
#set ENVIROMENT
ARG TARGET
ENV TARGET ${TARGET}
LABEL MAINTAINER="Parity Technologies <devops-team@parity.io>"
# install tools and dependencies
RUN apt update && apt install -y --no-install-recommends openssl libudev-dev file curl jq
@ -10,31 +7,25 @@ RUN apt update && apt install -y --no-install-recommends openssl libudev-dev fil
# show backtraces
ENV RUST_BACKTRACE 1
#cleanup Docker image
RUN apt autoremove -y
RUN apt clean -y
RUN rm -rf /tmp/* /var/tmp/* /var/lib/apt/lists/*
# cleanup Docker image
RUN apt autoremove -y \
&& apt clean -y \
&& rm -rf /tmp/* /var/tmp/* /var/lib/apt/lists/*
RUN groupadd -g 1000 parity \
&& useradd -m -u 1000 -g parity -s /bin/sh parity
WORKDIR /home/parity
ENV PATH "~/bin:${PATH}"
#add TARGET to docker image
COPY artifacts/x86_64-unknown-linux-gnu/$TARGET ./bin/$TARGET
# Build a shell script because the ENTRYPOINT command doesn't like using ENV
RUN echo "#!/bin/bash \n ${TARGET} \$@" > ./entrypoint.sh
RUN chmod +x ./entrypoint.sh
# add parity-ethereum to docker image
COPY artifacts/x86_64-unknown-linux-gnu/parity /bin/parity
COPY scripts/docker/hub/check_sync.sh /check_sync.sh
# switch to user parity here
USER parity
# setup ENTRYPOINT
VOLUME [ "/home/parity/.local/share/io.parity.ethereum" ]
EXPOSE 5001 8080 8082 8083 8545 8546 8180 30303/tcp 30303/udp
ENTRYPOINT ["./entrypoint.sh"]
ENTRYPOINT ["/bin/parity"]

View File

@ -1,14 +1,30 @@
FROM ubuntu:14.04
FROM ubuntu:xenial
WORKDIR /build
# install aarch64(armv8) dependencies and tools
RUN dpkg --add-architecture arm64
RUN echo '# source urls for arm64 \n\
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial main \n\
deb-src [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial main \n\
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-updates main \n\
deb-src [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-updates main \n\
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-security main \n\
deb-src [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-security main \n # end arm64 section' >> /etc/apt/sources.list &&\
sed -r 's/deb h/deb \[arch=amd64\] h/g' /etc/apt/sources.list > /tmp/sources-tmp.list && \
cp /tmp/sources-tmp.list /etc/apt/sources.list&& \
sed -r 's/deb-src h/deb-src \[arch=amd64\] h/g' /etc/apt/sources.list > /tmp/sources-tmp.list&&cat /etc/apt/sources.list &&\
cp /tmp/sources-tmp.list /etc/apt/sources.list&& echo "next"&&cat /etc/apt/sources.list
# install tools and dependencies
RUN apt-get -y update && \
apt-get install -y --force-yes --no-install-recommends \
curl git make g++ gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \
libc6-arm64-cross libc6-dev-arm64-cross wget file ca-certificates \
binutils-aarch64-linux-gnu cmake3 libudev-dev \
&& \
apt-get clean
apt-get upgrade -y && \
apt-get install -y --no-install-recommends \
curl make cmake file ca-certificates \
g++ gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \
libc6-dev-arm64-cross binutils-aarch64-linux-gnu \
libudev-dev libudev-dev:arm64 \
&& \
apt-get clean
# install rustup
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y
@ -30,14 +46,14 @@ RUN rustc -vV && cargo -V
# build parity
ADD . /build/parity
RUN cd parity && \
mkdir -p .cargo && \
echo '[target.aarch64-unknown-linux-gnu]\n\
linker = "aarch64-linux-gnu-gcc"\n'\
>>.cargo/config && \
cat .cargo/config && \
cargo build --target aarch64-unknown-linux-gnu --release --verbose && \
ls /build/parity/target/aarch64-unknown-linux-gnu/release/parity && \
/usr/bin/aarch64-linux-gnu-strip /build/parity/target/aarch64-unknown-linux-gnu/release/parity
mkdir -p .cargo && \
echo '[target.aarch64-unknown-linux-gnu]\n\
linker = "aarch64-linux-gnu-gcc"\n'\
>>.cargo/config && \
cat .cargo/config && \
cargo build --target aarch64-unknown-linux-gnu --release --verbose && \
ls /build/parity/target/aarch64-unknown-linux-gnu/release/parity && \
/usr/bin/aarch64-linux-gnu-strip /build/parity/target/aarch64-unknown-linux-gnu/release/parity
RUN file /build/parity/target/aarch64-unknown-linux-gnu/release/parity

View File

@ -9,11 +9,12 @@ echo "CARGO_HOME: " $CARGO_HOME
echo "CARGO_TARGET: " $CARGO_TARGET
echo "CC: " $CC
echo "CXX: " $CXX
#strip ON
export RUSTFLAGS=" -C link-arg=-s"
echo "_____ Building target: "$CARGO_TARGET" _____"
if [ "${CARGO_TARGET}" = "armv7-linux-androideabi" ]
then
# only thing we need for android
time cargo build --target $CARGO_TARGET --release -p parity-clib --features final
else
time cargo build --target $CARGO_TARGET --release --features final
@ -24,14 +25,11 @@ else
fi
echo "_____ Post-processing binaries _____"
rm -rf artifacts
mkdir -p artifacts
cd artifacts
mkdir -p $CARGO_TARGET
cd $CARGO_TARGET
mkdir -p artifacts/$CARGO_TARGET
cd artifacts/$CARGO_TARGET
if [ "${CARGO_TARGET}" = "armv7-linux-androideabi" ]
then
# only thing we need for android
cp -v ../../target/$CARGO_TARGET/release/libparity.so ./libparity.so
else
cp -v ../../target/$CARGO_TARGET/release/parity ./parity
@ -41,16 +39,6 @@ else
cp -v ../../target/$CARGO_TARGET/release/whisper ./whisper
fi
# stripping can also be done on release build time
# export RUSTFLAGS="${RUSTFLAGS} -C link-arg=-s"
if [ "${CARGO_TARGET}" = "armv7-linux-androideabi" ]
then
arm-linux-androideabi-strip -v ./*
else
strip -v ./*
fi
echo "_____ Calculating checksums _____"
for binary in $(ls)
do
@ -62,4 +50,3 @@ do
./parity tools hash $binary > $binary.sha3
fi
done

View File

@ -1,7 +0,0 @@
#!/bin/bash
set -e # fail on any error
set -u # treat unset variables as error
cargo install cargo-audit
cargo audit

View File

@ -36,19 +36,3 @@ do
esac
cd ..
done
echo "__________Push binaries to AWS S3____________"
aws configure set aws_access_key_id $s3_key
aws configure set aws_secret_access_key $s3_secret
case "${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}" in
(beta|stable|nightly)
export S3_BUCKET=releases.parity.io/ethereum;
;;
(*)
export S3_BUCKET=builds-parity;
;;
esac
aws s3 sync ./ s3://$S3_BUCKET/${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}/

View File

@ -21,7 +21,19 @@ SNAP_PACKAGE="parity_"$VERSION"_"$BUILD_ARCH".snap"
echo "__________Create snap package__________"
echo "Release channel :" $GRADE " Branch/tag: " $CI_COMMIT_REF_NAME
echo $VERSION:$GRADE:$BUILD_ARCH
cat scripts/snap/snapcraft.template.yaml | envsubst '$VERSION:$GRADE:$BUILD_ARCH:$CARGO_TARGET' > snapcraft.yaml
# cat scripts/snap/snapcraft.template.yaml | envsubst '$VERSION:$GRADE:$BUILD_ARCH:$CARGO_TARGET' > snapcraft.yaml
# a bit more necromancy (substitutions):
pwd
cd /builds/$CI_PROJECT_PATH/scripts/snap/
sed -e 's/$VERSION/'"$VERSION"'/g' \
-e 's/$GRADE/'"$GRADE"'/g' \
-e 's/$BUILD_ARCH/'"$BUILD_ARCH"'/g' \
-e 's/$CARGO_TARGET/'"$CARGO_TARGET"'/g' \
snapcraft.template.yaml > /builds/$CI_PROJECT_PATH/snapcraft.yaml
cd /builds/$CI_PROJECT_PATH
pwd
apt update
apt install -y --no-install-recommends rhash
cat snapcraft.yaml
snapcraft --target-arch=$BUILD_ARCH
ls *.snap

View File

@ -1,6 +1,4 @@
#!/bin/bash
# ARGUMENT $1 Rust flavor to test with (stable/beta/nightly)
set -e # fail on any error
set -u # treat unset variables as error
@ -27,9 +25,6 @@ then
exit 0
fi
rustup default $1
git submodule update --init --recursive
rustup show
exec ./test.sh

View File

@ -50,8 +50,4 @@ parts:
cp -v ethkey $SNAPCRAFT_PART_INSTALL/usr/bin/ethkey
cp -v ethstore $SNAPCRAFT_PART_INSTALL/usr/bin/ethstore
cp -v whisper $SNAPCRAFT_PART_INSTALL/usr/bin/whisper
stage-packages: [libc6, libudev1, libstdc++6, cmake, libdb]
df:
plugin: nil
stage-packages: [coreutils]
stage: [bin/df]
stage-packages: [libc6, libudev1, libstdc++6, cmake, libdb5.3]

25
test.sh
View File

@ -1,33 +1,12 @@
#!/bin/sh
# Running Parity Full Test Suite
echo "________Running test.sh________"
FEATURES="json-tests,ci-skip-issue"
OPTIONS="--release"
VALIDATE=1
THREADS=8
case $1 in
--no-json)
FEATURES="ipc"
shift # past argument=value
;;
--no-release)
OPTIONS=""
shift
;;
--no-validate)
VALIDATE=0
shift
;;
--no-run)
OPTIONS="--no-run"
shift
;;
*)
# unknown option
;;
esac
set -e
@ -57,7 +36,6 @@ cpp_test () {
cd build && \
cmake .. && \
make -j $THREADS && \
./parity-example && \
cd .. && \
rm -rf build && \
cd ../..
@ -98,4 +76,3 @@ then
else
cargo_test $@
fi

View File

@ -35,6 +35,7 @@ serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
error-chain = { version = "0.12", default-features = false }
lru-cache = "0.1"
[dev-dependencies]
tempdir = "0.3"

View File

@ -20,6 +20,7 @@ use std::collections::{HashSet, HashMap, VecDeque};
use std::collections::hash_map::Entry;
use std::default::Default;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use lru_cache::LruCache;
use hash::keccak;
use ethereum_types::{H256, H520};
use rlp::{Rlp, RlpStream};
@ -55,6 +56,8 @@ const REQUEST_BACKOFF: [Duration; 4] = [
const NODE_LAST_SEEN_TIMEOUT: Duration = Duration::from_secs(24*60*60);
const OBSERVED_NODES_MAX_SIZE: usize = 10_000;
#[derive(Clone, Debug)]
pub struct NodeEntry {
pub id: NodeId,
@ -95,7 +98,27 @@ struct FindNodeRequest {
#[derive(Clone, Copy)]
enum PingReason {
Default,
FromDiscoveryRequest(NodeId)
FromDiscoveryRequest(NodeId, NodeValidity),
}
#[derive(Clone, Copy, PartialEq)]
enum NodeCategory {
Bucket,
Observed
}
#[derive(Clone, Copy, PartialEq)]
enum NodeValidity {
Ourselves,
ValidNode(NodeCategory),
ExpiredNode(NodeCategory),
UnknownNode
}
#[derive(Debug)]
enum BucketError {
Ourselves,
NotInTheBucket{node_entry: NodeEntry, bucket_distance: usize},
}
struct PingRequest {
@ -145,6 +168,12 @@ pub struct Discovery<'a> {
discovery_id: NodeId,
discovery_nodes: HashSet<NodeId>,
node_buckets: Vec<NodeBucket>,
// Sometimes we don't want to add nodes to the NodeTable, but still want to
// keep track of them to avoid excessive pinging (happens when an unknown node sends
// a discovery request to us -- the node might be on a different net).
other_observed_nodes: LruCache<NodeId, (NodeEndpoint, Instant)>,
in_flight_pings: HashMap<NodeId, PingRequest>,
in_flight_find_nodes: HashMap<NodeId, FindNodeRequest>,
send_queue: VecDeque<Datagram>,
@ -171,6 +200,7 @@ impl<'a> Discovery<'a> {
discovery_id: NodeId::new(),
discovery_nodes: HashSet::new(),
node_buckets: (0..ADDRESS_BITS).map(|_| NodeBucket::new()).collect(),
other_observed_nodes: LruCache::new(OBSERVED_NODES_MAX_SIZE),
in_flight_pings: HashMap::new(),
in_flight_find_nodes: HashMap::new(),
send_queue: VecDeque::new(),
@ -200,41 +230,53 @@ impl<'a> Discovery<'a> {
}
}
fn update_node(&mut self, e: NodeEntry) -> Option<TableUpdates> {
trace!(target: "discovery", "Inserting {:?}", &e);
fn update_bucket_record(&mut self, e: NodeEntry) -> Result<(), BucketError> {
let id_hash = keccak(e.id);
let dist = match Discovery::distance(&self.id_hash, &id_hash) {
Some(dist) => dist,
None => {
debug!(target: "discovery", "Attempted to update own entry: {:?}", e);
return None;
return Err(BucketError::Ourselves);
}
};
let bucket = &mut self.node_buckets[dist];
bucket.nodes.iter_mut().find(|n| n.address.id == e.id)
.map_or(Err(BucketError::NotInTheBucket{node_entry: e.clone(), bucket_distance: dist}.into()), |entry| {
entry.address = e;
entry.last_seen = Instant::now();
entry.backoff_until = Instant::now();
entry.fail_count = 0;
Ok(())
})
}
let mut added_map = HashMap::new();
let ping = {
let bucket = &mut self.node_buckets[dist];
let updated = if let Some(node) = bucket.nodes.iter_mut().find(|n| n.address.id == e.id) {
node.address = e.clone();
node.last_seen = Instant::now();
node.backoff_until = Instant::now();
node.fail_count = 0;
true
} else { false };
fn update_node(&mut self, e: NodeEntry) -> Option<TableUpdates> {
trace!(target: "discovery", "Inserting {:?}", &e);
if !updated {
added_map.insert(e.id, e.clone());
bucket.nodes.push_front(BucketEntry::new(e));
match self.update_bucket_record(e) {
Ok(()) => None,
Err(BucketError::Ourselves) => None,
Err(BucketError::NotInTheBucket{node_entry, bucket_distance}) => Some((node_entry, bucket_distance))
}.map(|(node_entry, bucket_distance)| {
trace!(target: "discovery", "Adding a new node {:?} into our bucket {}", &node_entry, bucket_distance);
let mut added = HashMap::with_capacity(1);
added.insert(node_entry.id, node_entry.clone());
let node_to_ping = {
let bucket = &mut self.node_buckets[bucket_distance];
bucket.nodes.push_front(BucketEntry::new(node_entry));
if bucket.nodes.len() > BUCKET_SIZE {
select_bucket_ping(bucket.nodes.iter())
} else { None }
} else { None }
};
if let Some(node) = ping {
self.try_ping(node, PingReason::Default);
}
Some(TableUpdates { added: added_map, removed: HashSet::new() })
} else {
None
}
};
if let Some(node) = node_to_ping {
self.try_ping(node, PingReason::Default);
};
TableUpdates{added, removed: HashSet::new()}
})
}
/// Starts the discovery process at round 0
@ -542,10 +584,28 @@ impl<'a> Discovery<'a> {
};
if let Some((node, ping_reason)) = expected_node {
if let PingReason::FromDiscoveryRequest(target) = ping_reason {
if let PingReason::FromDiscoveryRequest(target, validity) = ping_reason {
self.respond_with_discovery(target, &node)?;
// kirushik: I would prefer to probe the network id of the remote node here, and add it to the nodes list if it's on "our" net --
// but `on_packet` happens synchronously, so doing the full TCP handshake ceremony here is a bad idea.
// So instead we just LRU-caching most recently seen nodes to avoid unnecessary pinging
match validity {
NodeValidity::ValidNode(NodeCategory::Bucket) | NodeValidity::ExpiredNode(NodeCategory::Bucket) => {
trace!(target: "discovery", "Updating node {:?} in our Kad buckets", &node);
self.update_bucket_record(node).unwrap_or_else(|error| {
debug!(target: "discovery", "Error occured when processing ping from a bucket node: {:?}", &error);
});
},
NodeValidity::UnknownNode | NodeValidity::ExpiredNode(NodeCategory::Observed) | NodeValidity::ValidNode(NodeCategory::Observed)=> {
trace!(target: "discovery", "Updating node {:?} in the list of other_observed_nodes", &node);
self.other_observed_nodes.insert(node.id, (node.endpoint, Instant::now()));
},
NodeValidity::Ourselves => (),
}
Ok(None)
} else {
Ok(self.update_node(node))
}
Ok(self.update_node(node))
} else {
debug!(target: "discovery", "Got unexpected Pong from {:?} ; request not found", &from);
Ok(None)
@ -566,31 +626,41 @@ impl<'a> Discovery<'a> {
}
};
if self.is_a_valid_known_node(&node) {
self.respond_with_discovery(target, &node)?;
} else {
match self.check_validity(&node) {
NodeValidity::Ourselves => (), // It makes no sense to respond to the discovery request from ourselves
NodeValidity::ValidNode(_) => self.respond_with_discovery(target, &node)?,
// Make sure the request source is actually there and responds to pings before actually responding
self.try_ping(node, PingReason::FromDiscoveryRequest(target));
invalidity_reason => self.try_ping(node, PingReason::FromDiscoveryRequest(target, invalidity_reason))
}
Ok(None)
}
fn is_a_valid_known_node(&self, node: &NodeEntry) -> bool {
fn check_validity(&mut self, node: &NodeEntry) -> NodeValidity {
let id_hash = keccak(node.id);
let dist = match Discovery::distance(&self.id_hash, &id_hash) {
Some(dist) => dist,
None => {
debug!(target: "discovery", "Got an incoming discovery request from self: {:?}", node);
return false;
return NodeValidity::Ourselves;
}
};
let bucket = &self.node_buckets[dist];
if let Some(known_node) = bucket.nodes.iter().find(|n| n.address.id == node.id) {
debug!(target: "discovery", "Found a known node in a bucket when processing discovery: {:?}/{:?}", known_node, node);
(known_node.address.endpoint == node.endpoint) && (known_node.last_seen.elapsed() < NODE_LAST_SEEN_TIMEOUT)
match ((known_node.address.endpoint == node.endpoint), (known_node.last_seen.elapsed() < NODE_LAST_SEEN_TIMEOUT)) {
(true, true) => NodeValidity::ValidNode(NodeCategory::Bucket),
(true, false) => NodeValidity::ExpiredNode(NodeCategory::Bucket),
_ => NodeValidity::UnknownNode
}
} else {
false
self.other_observed_nodes.get_mut(&node.id).map_or(NodeValidity::UnknownNode, |(endpoint, observed_at)| {
match ((node.endpoint==*endpoint), (observed_at.elapsed() < NODE_LAST_SEEN_TIMEOUT)) {
(true, true) => NodeValidity::ValidNode(NodeCategory::Observed),
(true, false) => NodeValidity::ExpiredNode(NodeCategory::Observed),
_ => NodeValidity::UnknownNode
}
})
}
}

View File

@ -85,6 +85,7 @@ extern crate keccak_hash as hash;
extern crate serde;
extern crate serde_json;
extern crate parity_snappy as snappy;
extern crate lru_cache;
#[macro_use]
extern crate error_chain;

View File

@ -3,7 +3,7 @@
[package]
name = "parity-version"
# NOTE: this value is used for Parity Ethereum version string (via env CARGO_PKG_VERSION)
version = "2.2.9"
version = "2.2.10"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"