diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a1a1c9799..a4109ad85 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,11 +2,14 @@ stages: - test - build - publish + - publish-onchain - optional image: parity/rust:gitlab-ci variables: + GIT_STRATEGY: fetch + GIT_SUBMODULE_STRATEGY: recursive CI_SERVER_NAME: "GitLab CI" CARGO_HOME: "${CI_PROJECT_DIR}/.cargo" CARGO_TARGET: x86_64-unknown-linux-gnu @@ -40,25 +43,29 @@ test-linux: variables: RUN_TESTS: all script: - - scripts/gitlab/test-all.sh stable + - scripts/gitlab/test-all.sh + - sccache -s tags: - - rust-stable + - linux-docker test-audit: stage: test script: - - scripts/gitlab/cargo-audit.sh + - set -e + - set -u + - cargo audit tags: - - rust-stable + - linux-docker build-linux: stage: build only: *releaseable_branches script: - scripts/gitlab/build-unix.sh + - sccache -s <<: *collect_artifacts tags: - - rust-stable + - linux-docker build-darwin: stage: build @@ -96,9 +103,9 @@ publish-docker: - scripts/gitlab/publish-docker.sh parity publish-snap: - stage: publish + stage: optional #publish only: *releaseable_branches - image: parity/snapcraft:gitlab-ci + image: snapcore/snapcraft variables: BUILD_ARCH: amd64 cache: {} @@ -112,19 +119,66 @@ publish-snap: allow_failure: true <<: *collect_artifacts -publish-awss3: - stage: publish +publish-onnet-update: + stage: publish-onchain only: *releaseable_branches - cache: {} + cache: {} dependencies: - build-linux - build-darwin - build-windows + - publish-awss3-release before_script: *determine_version script: - - scripts/gitlab/publish-awss3.sh + - scripts/gitlab/publish-onnet-update.sh tags: - - shell + - linux-docker + +# configures aws for fast uploads/syncs +.s3-before-script: &s3-before-script + before_script: + - mkdir -p ${HOME}/.aws + - | + cat > ${HOME}/.aws/config <"] diff --git a/ethcore/res/ethereum/st_peters_test.json b/ethcore/res/ethereum/st_peters_test.json new file mode 100644 index 000000000..ee88008f6 --- /dev/null +++ b/ethcore/res/ethereum/st_peters_test.json @@ -0,0 +1,65 @@ +{ + "name": "St. Peter's (test)", + "engine": { + "Ethash": { + "params": { + "minimumDifficulty": "0x020000", + "difficultyBoundDivisor": "0x0800", + "durationLimit": "0x0d", + "blockReward": "0x1BC16D674EC80000", + "homesteadTransition": "0x0", + "eip100bTransition": "0x0", + "difficultyBombDelays": { + "0": 5000000 + } + } + } + }, + "params": { + "gasLimitBoundDivisor": "0x0400", + "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", + "accountStartNonce": "0x00", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x1", + "maxCodeSize": 24576, + "maxCodeSizeTransition": "0x0", + "eip150Transition": "0x0", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "eip140Transition": "0x0", + "eip211Transition": "0x0", + "eip214Transition": "0x0", + "eip155Transition": "0x0", + "eip658Transition": "0x0", + "eip145Transition": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", + "eip1283DisableTransition": "0x0" + }, + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x0000000000000042", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x400000000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", + "gasLimit": "0x1388" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": "0x00", "pricing": { "modexp": { "divisor": 20 } } } }, + "0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": "0x00", "pricing": { "linear": { "base": 500, "word": 0 } } } }, + "0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": "0x00", "pricing": { "linear": { "base": 40000, "word": 0 } } } }, + "0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": "0x00", "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } } + } +} diff --git a/ethcore/res/ethereum/tests b/ethcore/res/ethereum/tests index 420f44347..725dbc73a 160000 --- a/ethcore/res/ethereum/tests +++ b/ethcore/res/ethereum/tests @@ -1 +1 @@ -Subproject commit 420f443477caa8516f1f9ee8122fafc3415c0f34 +Subproject commit 725dbc73a54649e22a00330bd0f4d6699a5060e5 diff --git a/ethcore/src/client/evm_test_client.rs b/ethcore/src/client/evm_test_client.rs index 20a04613a..8ffc7d05b 100644 --- a/ethcore/src/client/evm_test_client.rs +++ b/ethcore/src/client/evm_test_client.rs @@ -87,6 +87,7 @@ impl<'a> EvmTestClient<'a> { ForkSpec::EIP158 => Some(ethereum::new_eip161_test()), ForkSpec::Byzantium => Some(ethereum::new_byzantium_test()), ForkSpec::Constantinople => Some(ethereum::new_constantinople_test()), + ForkSpec::ConstantinopleFix => Some(ethereum::new_constantinople_fix_test()), ForkSpec::EIP158ToByzantiumAt5 => Some(ethereum::new_transition_test()), ForkSpec::FrontierToHomesteadAt5 | ForkSpec::HomesteadToDaoAt5 | ForkSpec::HomesteadToEIP150At5 => None, } diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 02268747a..1b00247ed 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -89,6 +89,8 @@ pub enum BlockError { InvalidNumber(Mismatch), /// Block number isn't sensible. RidiculousNumber(OutOfBounds), + /// Timestamp header overflowed + TimestampOverflow, /// Too many transactions from a particular address. TooManyTransactions(Address), /// Parent given is unknown. @@ -138,6 +140,7 @@ impl fmt::Display for BlockError { UnknownParent(ref hash) => format!("Unknown parent: {}", hash), UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash), UnknownEpochTransition(ref num) => format!("Unknown transition to epoch number: {}", num), + TimestampOverflow => format!("Timestamp overflow"), TooManyTransactions(ref address) => format!("Too many transactions from: {}", address), }; diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index b024d3420..dbac07a61 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -143,6 +143,9 @@ pub fn new_byzantium_test() -> Spec { load(None, include_bytes!("../../res/ether /// Create a new Foundation Constantinople era spec. pub fn new_constantinople_test() -> Spec { load(None, include_bytes!("../../res/ethereum/constantinople_test.json")) } +/// Create a new Foundation St. Peter's (Contantinople Fix) era spec. +pub fn new_constantinople_fix_test() -> Spec { load(None, include_bytes!("../../res/ethereum/st_peters_test.json")) } + /// Create a new Musicoin-MCIP3-era spec. pub fn new_mcip3_test() -> Spec { load(None, include_bytes!("../../res/ethereum/mcip3_test.json")) } @@ -163,6 +166,9 @@ pub fn new_byzantium_test_machine() -> EthereumMachine { load_machine(include_by /// Create a new Foundation Constantinople era spec. pub fn new_constantinople_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/constantinople_test.json")) } +/// Create a new Foundation St. Peter's (Contantinople Fix) era spec. +pub fn new_constantinople_fix_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/st_peters_test.json")) } + /// Create a new Musicoin-MCIP3-era spec. pub fn new_mcip3_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/mcip3_test.json")) } diff --git a/ethcore/src/json_tests/state.rs b/ethcore/src/json_tests/state.rs index f402a7b37..88b0c70d5 100644 --- a/ethcore/src/json_tests/state.rs +++ b/ethcore/src/json_tests/state.rs @@ -165,6 +165,7 @@ mod state_tests { declare_test!{GeneralStateTest_stRefundTest, "GeneralStateTests/stRefundTest/"} declare_test!{GeneralStateTest_stReturnDataTest, "GeneralStateTests/stReturnDataTest/"} declare_test!{GeneralStateTest_stRevertTest, "GeneralStateTests/stRevertTest/"} + declare_test!{GeneralStateTest_stSStoreTest, "GeneralStateTests/stSStoreTest/"} declare_test!{GeneralStateTest_stShift, "GeneralStateTests/stShift/"} declare_test!{GeneralStateTest_stSolidityTest, "GeneralStateTests/stSolidityTest/"} declare_test!{GeneralStateTest_stSpecialTest, "GeneralStateTests/stSpecialTest/"} @@ -177,7 +178,6 @@ mod state_tests { declare_test!{GeneralStateTest_stZeroCallsRevert, "GeneralStateTests/stZeroCallsRevert/"} declare_test!{GeneralStateTest_stZeroCallsTest, "GeneralStateTests/stZeroCallsTest/"} declare_test!{GeneralStateTest_stZeroKnowledge, "GeneralStateTests/stZeroKnowledge/"} - declare_test!{GeneralStateTest_stSStoreTest, "GeneralStateTests/stSStoreTest/"} // Attempts to send a transaction that requires more than current balance: // Tx: diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 3bfe30a44..ddceeba27 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -39,6 +39,25 @@ use header::{BlockNumber, Header}; use transaction::SignedTransaction; use verification::queue::kind::blocks::Unverified; + +/// Returns `Ok` when the result less or equal to `i32::max_value` to prevent `SystemTime` to panic because +/// it is platform specific, may be i32 or i64. +/// +/// `Err Result { + let d1 = sys.duration_since(UNIX_EPOCH).map_err(|_| BlockError::TimestampOverflow)?; + let total_time = d1.checked_add(d2).ok_or(BlockError::TimestampOverflow)?; + + if total_time.as_secs() <= i32::max_value() as u64 { + Ok(sys + d2) + } else { + Err(BlockError::TimestampOverflow) + } +} + /// Preprocessed block data gathered in `verify_block_unordered` call pub struct PreverifiedBlock { /// Populated block header @@ -305,7 +324,7 @@ pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool, const ACCEPTABLE_DRIFT: Duration = Duration::from_secs(15); let max_time = SystemTime::now() + ACCEPTABLE_DRIFT; let invalid_threshold = max_time + ACCEPTABLE_DRIFT * 9; - let timestamp = UNIX_EPOCH + Duration::from_secs(header.timestamp()); + let timestamp = timestamp_checked_add(UNIX_EPOCH, Duration::from_secs(header.timestamp()))?; if timestamp > invalid_threshold { return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: Some(max_time), min: None, found: timestamp }))) @@ -327,8 +346,8 @@ fn verify_parent(header: &Header, parent: &Header, engine: &EthEngine) -> Result let gas_limit_divisor = engine.params().gas_limit_bound_divisor; if !engine.is_timestamp_valid(header.timestamp(), parent.timestamp()) { - let min = SystemTime::now() + Duration::from_secs(parent.timestamp() + 1); - let found = SystemTime::now() + Duration::from_secs(header.timestamp()); + let min = timestamp_checked_add(SystemTime::now(), Duration::from_secs(parent.timestamp().saturating_add(1)))?; + let found = timestamp_checked_add(SystemTime::now(), Duration::from_secs(header.timestamp()))?; return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(min), found }))) } if header.number() != parent.number() + 1 { @@ -734,7 +753,8 @@ mod tests { check_fail_timestamp(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), false); header = good.clone(); - header.set_timestamp(2450000000); + // will return `BlockError::TimestampOverflow` when timestamp > `i32::max_value()` + header.set_timestamp(i32::max_value() as u64); check_fail_timestamp(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), false); header = good.clone(); @@ -806,4 +826,11 @@ mod tests { check_fail(unordered_test(&create_test_block_with_data(&header, &bad_transactions, &[]), &engine), TooManyTransactions(keypair.address())); unordered_test(&create_test_block_with_data(&header, &good_transactions, &[]), &engine).unwrap(); } + + #[test] + fn checked_add_systime_dur() { + assert!(timestamp_checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64 + 1, 0)).is_err()); + assert!(timestamp_checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64, 0)).is_ok()); + assert!(timestamp_checked_add(UNIX_EPOCH, Duration::new(i32::max_value() as u64 - 1, 1_000_000_000)).is_ok()); + } } diff --git a/json/src/spec/spec.rs b/json/src/spec/spec.rs index e0a869891..43e35982e 100644 --- a/json/src/spec/spec.rs +++ b/json/src/spec/spec.rs @@ -30,6 +30,7 @@ pub enum ForkSpec { Homestead, Byzantium, Constantinople, + ConstantinopleFix, EIP158ToByzantiumAt5, FrontierToHomesteadAt5, HomesteadToDaoAt5, diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 67f3c9fd4..43a1d5dbc 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -41,11 +41,11 @@ use sync::LightSync; use transaction::{Action, SignedTransaction, PendingTransaction, Transaction, Error as TransactionError}; use jsonrpc_core::{BoxFuture, Result, Error}; -use jsonrpc_core::futures::{future, Future, Poll, Async}; +use jsonrpc_core::futures::{future, Future, Poll, Async, IntoFuture}; use jsonrpc_core::futures::future::Either; use v1::helpers::{errors, nonce, TransactionRequest, FilledTransactionRequest, ConfirmationPayload}; use v1::types::{ - H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes, + H520 as RpcH520, Bytes as RpcBytes, RichRawTransaction as RpcRichRawTransaction, ConfirmationPayload as RpcConfirmationPayload, ConfirmationResponse, @@ -69,12 +69,20 @@ pub trait Dispatcher: Send + Sync + Clone { fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, force_nonce: bool) -> BoxFuture; - /// Sign the given transaction request without dispatching, fetching appropriate nonce. - fn sign(&self, accounts: Arc, filled: FilledTransactionRequest, password: SignWith) - -> BoxFuture>; + /// Sign the given transaction request, fetching appropriate nonce and executing the PostSign action + fn sign

( + &self, + accounts: Arc, + filled: FilledTransactionRequest, + password: SignWith, + post_sign: P + ) -> BoxFuture + where + P: PostSign + 'static, + ::Future: Send; /// Converts a `SignedTransaction` into `RichRawTransaction` - fn enrich(&self, SignedTransaction) -> RpcRichRawTransaction; + fn enrich(&self, signed: SignedTransaction) -> RpcRichRawTransaction; /// "Dispatch" a local transaction. fn dispatch_transaction(&self, signed_transaction: PendingTransaction) @@ -164,19 +172,30 @@ impl Dispatcher })) } - fn sign(&self, accounts: Arc, filled: FilledTransactionRequest, password: SignWith) - -> BoxFuture> + fn sign

( + &self, + accounts: Arc, + filled: FilledTransactionRequest, + password: SignWith, + post_sign: P + ) -> BoxFuture + where + P: PostSign + 'static, + ::Future: Send { let chain_id = self.client.signing_chain_id(); if let Some(nonce) = filled.nonce { - return Box::new(future::done(sign_transaction(&*accounts, filled, chain_id, nonce, password))); + let future = sign_transaction(&*accounts, filled, chain_id, nonce, password) + .into_future() + .and_then(move |signed| post_sign.execute(signed)); + Box::new(future) + } else { + let state = self.state_nonce(&filled.from); + let reserved = self.nonces.lock().reserve(filled.from, state); + + Box::new(ProspectiveSigner::new(accounts, filled, chain_id, reserved, password, post_sign)) } - - let state = self.state_nonce(&filled.from); - let reserved = self.nonces.lock().reserve(filled.from, state); - - Box::new(ProspectiveSigner::new(accounts, filled, chain_id, reserved, password)) } fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction { @@ -396,12 +415,24 @@ impl Dispatcher for LightDispatcher { })) } - fn sign(&self, accounts: Arc, filled: FilledTransactionRequest, password: SignWith) - -> BoxFuture> + fn sign

( + &self, + accounts: Arc, + filled: FilledTransactionRequest, + password: SignWith, + post_sign: P + ) -> BoxFuture + where + P: PostSign + 'static, + ::Future: Send { let chain_id = self.client.signing_chain_id(); let nonce = filled.nonce.expect("nonce is always provided; qed"); - Box::new(future::done(sign_transaction(&*accounts, filled, chain_id, nonce, password))) + + let future = sign_transaction(&*accounts, filled, chain_id, nonce, password) + .into_future() + .and_then(move |signed| post_sign.execute(signed)); + Box::new(future) } fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction { @@ -449,28 +480,60 @@ fn sign_transaction( #[derive(Debug, Clone, Copy)] enum ProspectiveSignerState { TryProspectiveSign, + WaitForPostSign, WaitForNonce, - Finish, } -struct ProspectiveSigner { +struct ProspectiveSigner { accounts: Arc, filled: FilledTransactionRequest, chain_id: Option, reserved: nonce::Reserved, password: SignWith, state: ProspectiveSignerState, - prospective: Option>>, + prospective: Option>, ready: Option, + post_sign: Option

, + post_sign_future: Option<::Future> } -impl ProspectiveSigner { +/// action to execute after signing +/// e.g importing a transaction into the chain +pub trait PostSign: Send { + /// item that this PostSign returns + type Item: Send; + /// incase you need to perform async PostSign actions + type Out: IntoFuture + Send; + /// perform an action with the signed transaction + fn execute(self, signer: WithToken) -> Self::Out; +} + +impl PostSign for () { + type Item = WithToken; + type Out = Result; + fn execute(self, signed: WithToken) -> Self::Out { + Ok(signed) + } +} + +impl PostSign for F + where F: FnOnce(WithToken) -> Result +{ + type Item = T; + type Out = Result; + fn execute(self, signed: WithToken) -> Self::Out { + (self)(signed) + } +} + +impl ProspectiveSigner

{ pub fn new( accounts: Arc, filled: FilledTransactionRequest, chain_id: Option, reserved: nonce::Reserved, password: SignWith, + post_sign: P ) -> Self { // If the account is permanently unlocked we can try to sign // using prospective nonce. This should speed up sending @@ -491,6 +554,8 @@ impl ProspectiveSigner { }, prospective: None, ready: None, + post_sign: Some(post_sign), + post_sign_future: None } } @@ -509,8 +574,8 @@ impl ProspectiveSigner { } } -impl Future for ProspectiveSigner { - type Item = WithToken; +impl Future for ProspectiveSigner

{ + type Item = P::Item; type Error = Error; fn poll(&mut self) -> Poll { @@ -523,32 +588,45 @@ impl Future for ProspectiveSigner { match self.poll_reserved()? { Async::NotReady => { self.state = WaitForNonce; - self.prospective = Some(self.sign(self.reserved.prospective_value())); + self.prospective = Some(self.sign(self.reserved.prospective_value())?); }, Async::Ready(nonce) => { - self.state = Finish; - self.prospective = Some(self.sign(nonce.value())); + self.state = WaitForPostSign; + self.post_sign_future = Some(self.post_sign.take() + .expect("post_sign is set on creation; qed") + .execute(self.sign(nonce.value())?) + .into_future()); self.ready = Some(nonce); }, } }, WaitForNonce => { let nonce = try_ready!(self.poll_reserved()); - let result = match (self.prospective.take(), nonce.matches_prospective()) { + let prospective = match (self.prospective.take(), nonce.matches_prospective()) { (Some(prospective), true) => prospective, - _ => self.sign(nonce.value()), + _ => self.sign(nonce.value())?, }; - self.state = Finish; - self.prospective = Some(result); self.ready = Some(nonce); + self.state = WaitForPostSign; + self.post_sign_future = Some(self.post_sign.take() + .expect("post_sign is set on creation; qed") + .execute(prospective) + .into_future()); }, - Finish => { - if let (Some(result), Some(nonce)) = (self.prospective.take(), self.ready.take()) { - // Mark nonce as used on successful signing - return result.map(move |tx| { - nonce.mark_used(); - Async::Ready(tx) - }) + WaitForPostSign => { + if let Some(mut fut) = self.post_sign_future.as_mut() { + match fut.poll()? { + Async::Ready(item) => { + let nonce = self.ready + .take() + .expect("nonce is set before state transitions to WaitForPostSign; qed"); + nonce.mark_used(); + return Ok(Async::Ready(item)) + }, + Async::NotReady => { + return Ok(Async::NotReady) + } + } } else { panic!("Poll after ready."); } @@ -655,19 +733,21 @@ pub fn execute( match payload { ConfirmationPayload::SendTransaction(request) => { let condition = request.condition.clone().map(Into::into); - Box::new(dispatcher.sign(accounts, request, pass) - .map(move |v| v.map(move |tx| PendingTransaction::new(tx, condition))) - .map(WithToken::into_tuple) - .map(|(tx, token)| (tx, token, dispatcher)) - .and_then(|(tx, tok, dispatcher)| { - dispatcher.dispatch_transaction(tx) - .map(RpcH256::from) - .map(ConfirmationResponse::SendTransaction) - .map(move |h| WithToken::from((h, tok))) - })) + let cloned_dispatcher = dispatcher.clone(); + let post_sign = move |with_token_signed: WithToken| { + let (signed, token) = with_token_signed.into_tuple(); + let signed_transaction = PendingTransaction::new(signed, condition); + cloned_dispatcher.dispatch_transaction(signed_transaction) + .map(|hash| (hash, token)) + }; + let future = dispatcher.sign(accounts, request, pass, post_sign) + .map(|(hash, token)| { + WithToken::from((ConfirmationResponse::SendTransaction(hash.into()), token)) + }); + Box::new(future) }, ConfirmationPayload::SignTransaction(request) => { - Box::new(dispatcher.sign(accounts, request, pass) + Box::new(dispatcher.sign(accounts, request, pass, ()) .map(move |result| result .map(move |tx| dispatcher.enrich(tx)) .map(ConfirmationResponse::SignTransaction) diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index e42a91b1f..57a2e8e40 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::time::Duration; -use bytes::{Bytes, ToPretty}; +use bytes::Bytes; use ethcore::account_provider::AccountProvider; use transaction::PendingTransaction; use ethereum_types::{H520, U128, Address}; @@ -27,7 +27,7 @@ use ethkey::{public_to_address, recover, Signature}; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_core::futures::{future, Future}; use v1::helpers::{errors, eip191}; -use v1::helpers::dispatch::{self, eth_data_hash, Dispatcher, SignWith}; +use v1::helpers::dispatch::{self, eth_data_hash, Dispatcher, SignWith, PostSign, WithToken}; use v1::traits::Personal; use v1::types::{ H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U128 as RpcU128, @@ -41,6 +41,7 @@ use v1::types::{ use v1::metadata::Metadata; use eip712::{EIP712, hash_structured_data}; use jsonrpc_core::types::Value; +use transaction::SignedTransaction; /// Account management (personal) rpc implementation. pub struct PersonalClient { @@ -68,7 +69,16 @@ impl PersonalClient { } impl PersonalClient { - fn do_sign_transaction(&self, _meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<(PendingTransaction, D)> { + fn do_sign_transaction

( + &self, + _meta: Metadata, + request: TransactionRequest, + password: String, + post_sign: P + ) -> BoxFuture + where P: PostSign + 'static, + ::Future: Send + { let dispatcher = self.dispatcher.clone(); let accounts = self.accounts.clone(); @@ -86,11 +96,7 @@ impl PersonalClient { Box::new(dispatcher.fill_optional_fields(request.into(), default, false) .and_then(move |filled| { - let condition = filled.condition.clone().map(Into::into); - dispatcher.sign(accounts, filled, SignWith::Password(password.into())) - .map(|tx| tx.into_value()) - .map(move |tx| PendingTransaction::new(tx, condition)) - .map(move |tx| (tx, dispatcher)) + dispatcher.sign(accounts, filled, SignWith::Password(password.into()), post_sign) }) ) } @@ -223,18 +229,26 @@ impl Personal for PersonalClient { } fn sign_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture { - Box::new(self.do_sign_transaction(meta, request, password) - .map(|(pending_tx, dispatcher)| dispatcher.enrich(pending_tx.transaction))) + let condition = request.condition.clone().map(Into::into); + let dispatcher = self.dispatcher.clone(); + Box::new(self.do_sign_transaction(meta, request, password, ()) + .map(move |tx| PendingTransaction::new(tx.into_value(), condition)) + .map(move |pending_tx| dispatcher.enrich(pending_tx.transaction))) } fn send_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture { - Box::new(self.do_sign_transaction(meta, request, password) - .and_then(|(pending_tx, dispatcher)| { - let chain_id = pending_tx.chain_id(); - trace!(target: "miner", "send_transaction: dispatching tx: {} for chain ID {:?}", - ::rlp::encode(&*pending_tx).pretty(), chain_id); - - dispatcher.dispatch_transaction(pending_tx).map(Into::into) + let condition = request.condition.clone().map(Into::into); + let dispatcher = self.dispatcher.clone(); + Box::new(self.do_sign_transaction(meta, request, password, move |signed: WithToken| { + dispatcher.dispatch_transaction( + PendingTransaction::new( + signed.into_value(), + condition + ) + ) + }) + .and_then(|hash| { + Ok(RpcH256::from(hash)) }) ) } diff --git a/scripts/docker/hub/Dockerfile b/scripts/docker/hub/Dockerfile index e9813754b..c2249a56d 100644 --- a/scripts/docker/hub/Dockerfile +++ b/scripts/docker/hub/Dockerfile @@ -1,8 +1,5 @@ FROM ubuntu:xenial -MAINTAINER Parity Technologies -#set ENVIROMENT -ARG TARGET -ENV TARGET ${TARGET} +LABEL MAINTAINER="Parity Technologies " # install tools and dependencies RUN apt update && apt install -y --no-install-recommends openssl libudev-dev file curl jq @@ -10,31 +7,25 @@ RUN apt update && apt install -y --no-install-recommends openssl libudev-dev fil # show backtraces ENV RUST_BACKTRACE 1 -#cleanup Docker image -RUN apt autoremove -y -RUN apt clean -y -RUN rm -rf /tmp/* /var/tmp/* /var/lib/apt/lists/* +# cleanup Docker image +RUN apt autoremove -y \ + && apt clean -y \ + && rm -rf /tmp/* /var/tmp/* /var/lib/apt/lists/* RUN groupadd -g 1000 parity \ && useradd -m -u 1000 -g parity -s /bin/sh parity - WORKDIR /home/parity -ENV PATH "~/bin:${PATH}" - -#add TARGET to docker image -COPY artifacts/x86_64-unknown-linux-gnu/$TARGET ./bin/$TARGET - -# Build a shell script because the ENTRYPOINT command doesn't like using ENV -RUN echo "#!/bin/bash \n ${TARGET} \$@" > ./entrypoint.sh -RUN chmod +x ./entrypoint.sh +# add parity-ethereum to docker image +COPY artifacts/x86_64-unknown-linux-gnu/parity /bin/parity COPY scripts/docker/hub/check_sync.sh /check_sync.sh # switch to user parity here USER parity -# setup ENTRYPOINT +VOLUME [ "/home/parity/.local/share/io.parity.ethereum" ] EXPOSE 5001 8080 8082 8083 8545 8546 8180 30303/tcp 30303/udp -ENTRYPOINT ["./entrypoint.sh"] + +ENTRYPOINT ["/bin/parity"] diff --git a/scripts/docker/ubuntu-aarch64/Dockerfile b/scripts/docker/ubuntu-aarch64/Dockerfile index cd8320530..53eb325ac 100644 --- a/scripts/docker/ubuntu-aarch64/Dockerfile +++ b/scripts/docker/ubuntu-aarch64/Dockerfile @@ -1,14 +1,30 @@ -FROM ubuntu:14.04 +FROM ubuntu:xenial WORKDIR /build +# install aarch64(armv8) dependencies and tools +RUN dpkg --add-architecture arm64 +RUN echo '# source urls for arm64 \n\ + deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial main \n\ + deb-src [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial main \n\ + deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-updates main \n\ + deb-src [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-updates main \n\ + deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-security main \n\ + deb-src [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ xenial-security main \n # end arm64 section' >> /etc/apt/sources.list &&\ + sed -r 's/deb h/deb \[arch=amd64\] h/g' /etc/apt/sources.list > /tmp/sources-tmp.list && \ + cp /tmp/sources-tmp.list /etc/apt/sources.list&& \ + sed -r 's/deb-src h/deb-src \[arch=amd64\] h/g' /etc/apt/sources.list > /tmp/sources-tmp.list&&cat /etc/apt/sources.list &&\ + cp /tmp/sources-tmp.list /etc/apt/sources.list&& echo "next"&&cat /etc/apt/sources.list + # install tools and dependencies RUN apt-get -y update && \ - apt-get install -y --force-yes --no-install-recommends \ - curl git make g++ gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \ - libc6-arm64-cross libc6-dev-arm64-cross wget file ca-certificates \ - binutils-aarch64-linux-gnu cmake3 libudev-dev \ - && \ - apt-get clean + apt-get upgrade -y && \ + apt-get install -y --no-install-recommends \ + curl make cmake file ca-certificates \ + g++ gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \ + libc6-dev-arm64-cross binutils-aarch64-linux-gnu \ + libudev-dev libudev-dev:arm64 \ + && \ + apt-get clean # install rustup RUN curl https://sh.rustup.rs -sSf | sh -s -- -y @@ -30,14 +46,14 @@ RUN rustc -vV && cargo -V # build parity ADD . /build/parity RUN cd parity && \ - mkdir -p .cargo && \ - echo '[target.aarch64-unknown-linux-gnu]\n\ - linker = "aarch64-linux-gnu-gcc"\n'\ - >>.cargo/config && \ - cat .cargo/config && \ - cargo build --target aarch64-unknown-linux-gnu --release --verbose && \ - ls /build/parity/target/aarch64-unknown-linux-gnu/release/parity && \ - /usr/bin/aarch64-linux-gnu-strip /build/parity/target/aarch64-unknown-linux-gnu/release/parity + mkdir -p .cargo && \ + echo '[target.aarch64-unknown-linux-gnu]\n\ + linker = "aarch64-linux-gnu-gcc"\n'\ + >>.cargo/config && \ + cat .cargo/config && \ + cargo build --target aarch64-unknown-linux-gnu --release --verbose && \ + ls /build/parity/target/aarch64-unknown-linux-gnu/release/parity && \ + /usr/bin/aarch64-linux-gnu-strip /build/parity/target/aarch64-unknown-linux-gnu/release/parity RUN file /build/parity/target/aarch64-unknown-linux-gnu/release/parity diff --git a/scripts/gitlab/build-unix.sh b/scripts/gitlab/build-unix.sh index 9bb6cd0f3..6244dc846 100755 --- a/scripts/gitlab/build-unix.sh +++ b/scripts/gitlab/build-unix.sh @@ -9,11 +9,12 @@ echo "CARGO_HOME: " $CARGO_HOME echo "CARGO_TARGET: " $CARGO_TARGET echo "CC: " $CC echo "CXX: " $CXX +#strip ON +export RUSTFLAGS=" -C link-arg=-s" echo "_____ Building target: "$CARGO_TARGET" _____" if [ "${CARGO_TARGET}" = "armv7-linux-androideabi" ] then -# only thing we need for android time cargo build --target $CARGO_TARGET --release -p parity-clib --features final else time cargo build --target $CARGO_TARGET --release --features final @@ -24,14 +25,11 @@ else fi echo "_____ Post-processing binaries _____" -rm -rf artifacts -mkdir -p artifacts -cd artifacts -mkdir -p $CARGO_TARGET -cd $CARGO_TARGET +mkdir -p artifacts/$CARGO_TARGET +cd artifacts/$CARGO_TARGET + if [ "${CARGO_TARGET}" = "armv7-linux-androideabi" ] then -# only thing we need for android cp -v ../../target/$CARGO_TARGET/release/libparity.so ./libparity.so else cp -v ../../target/$CARGO_TARGET/release/parity ./parity @@ -41,16 +39,6 @@ else cp -v ../../target/$CARGO_TARGET/release/whisper ./whisper fi - -# stripping can also be done on release build time -# export RUSTFLAGS="${RUSTFLAGS} -C link-arg=-s" -if [ "${CARGO_TARGET}" = "armv7-linux-androideabi" ] -then - arm-linux-androideabi-strip -v ./* -else - strip -v ./* -fi - echo "_____ Calculating checksums _____" for binary in $(ls) do @@ -62,4 +50,3 @@ do ./parity tools hash $binary > $binary.sha3 fi done - diff --git a/scripts/gitlab/cargo-audit.sh b/scripts/gitlab/cargo-audit.sh deleted file mode 100755 index 16f0dc934..000000000 --- a/scripts/gitlab/cargo-audit.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -e # fail on any error -set -u # treat unset variables as error - -cargo install cargo-audit -cargo audit diff --git a/scripts/gitlab/publish-awss3.sh b/scripts/gitlab/publish-onnet-update.sh similarity index 73% rename from scripts/gitlab/publish-awss3.sh rename to scripts/gitlab/publish-onnet-update.sh index af768a632..588cbdfb5 100755 --- a/scripts/gitlab/publish-awss3.sh +++ b/scripts/gitlab/publish-onnet-update.sh @@ -36,19 +36,3 @@ do esac cd .. done - -echo "__________Push binaries to AWS S3____________" -aws configure set aws_access_key_id $s3_key -aws configure set aws_secret_access_key $s3_secret - -case "${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}" in - (beta|stable|nightly) - export S3_BUCKET=releases.parity.io/ethereum; - ;; - (*) - export S3_BUCKET=builds-parity; - ;; -esac - -aws s3 sync ./ s3://$S3_BUCKET/${SCHEDULE_TAG:-${CI_COMMIT_REF_NAME}}/ - diff --git a/scripts/gitlab/publish-snap.sh b/scripts/gitlab/publish-snap.sh index f001bbff0..386abdf37 100755 --- a/scripts/gitlab/publish-snap.sh +++ b/scripts/gitlab/publish-snap.sh @@ -21,7 +21,19 @@ SNAP_PACKAGE="parity_"$VERSION"_"$BUILD_ARCH".snap" echo "__________Create snap package__________" echo "Release channel :" $GRADE " Branch/tag: " $CI_COMMIT_REF_NAME echo $VERSION:$GRADE:$BUILD_ARCH -cat scripts/snap/snapcraft.template.yaml | envsubst '$VERSION:$GRADE:$BUILD_ARCH:$CARGO_TARGET' > snapcraft.yaml +# cat scripts/snap/snapcraft.template.yaml | envsubst '$VERSION:$GRADE:$BUILD_ARCH:$CARGO_TARGET' > snapcraft.yaml +# a bit more necromancy (substitutions): +pwd +cd /builds/$CI_PROJECT_PATH/scripts/snap/ +sed -e 's/$VERSION/'"$VERSION"'/g' \ + -e 's/$GRADE/'"$GRADE"'/g' \ + -e 's/$BUILD_ARCH/'"$BUILD_ARCH"'/g' \ + -e 's/$CARGO_TARGET/'"$CARGO_TARGET"'/g' \ + snapcraft.template.yaml > /builds/$CI_PROJECT_PATH/snapcraft.yaml +cd /builds/$CI_PROJECT_PATH +pwd +apt update +apt install -y --no-install-recommends rhash cat snapcraft.yaml snapcraft --target-arch=$BUILD_ARCH ls *.snap diff --git a/scripts/gitlab/test-all.sh b/scripts/gitlab/test-all.sh index 15c228706..925124b7a 100755 --- a/scripts/gitlab/test-all.sh +++ b/scripts/gitlab/test-all.sh @@ -1,6 +1,4 @@ #!/bin/bash -# ARGUMENT $1 Rust flavor to test with (stable/beta/nightly) - set -e # fail on any error set -u # treat unset variables as error @@ -27,9 +25,6 @@ then exit 0 fi -rustup default $1 - -git submodule update --init --recursive rustup show exec ./test.sh diff --git a/scripts/snap/snapcraft.template.yaml b/scripts/snap/snapcraft.template.yaml index eb67ba128..d170241db 100644 --- a/scripts/snap/snapcraft.template.yaml +++ b/scripts/snap/snapcraft.template.yaml @@ -50,8 +50,4 @@ parts: cp -v ethkey $SNAPCRAFT_PART_INSTALL/usr/bin/ethkey cp -v ethstore $SNAPCRAFT_PART_INSTALL/usr/bin/ethstore cp -v whisper $SNAPCRAFT_PART_INSTALL/usr/bin/whisper - stage-packages: [libc6, libudev1, libstdc++6, cmake, libdb] - df: - plugin: nil - stage-packages: [coreutils] - stage: [bin/df] + stage-packages: [libc6, libudev1, libstdc++6, cmake, libdb5.3] diff --git a/test.sh b/test.sh index e2b5dcf7c..7c132aa00 100755 --- a/test.sh +++ b/test.sh @@ -1,33 +1,12 @@ #!/bin/sh # Running Parity Full Test Suite +echo "________Running test.sh________" FEATURES="json-tests,ci-skip-issue" OPTIONS="--release" VALIDATE=1 THREADS=8 -case $1 in - --no-json) - FEATURES="ipc" - shift # past argument=value - ;; - --no-release) - OPTIONS="" - shift - ;; - --no-validate) - VALIDATE=0 - shift - ;; - --no-run) - OPTIONS="--no-run" - shift - ;; - *) - # unknown option - ;; -esac - set -e @@ -57,7 +36,6 @@ cpp_test () { cd build && \ cmake .. && \ make -j $THREADS && \ - ./parity-example && \ cd .. && \ rm -rf build && \ cd ../.. @@ -98,4 +76,3 @@ then else cargo_test $@ fi - diff --git a/util/network-devp2p/Cargo.toml b/util/network-devp2p/Cargo.toml index 74a4cc945..3a8cde91e 100644 --- a/util/network-devp2p/Cargo.toml +++ b/util/network-devp2p/Cargo.toml @@ -35,6 +35,7 @@ serde = "1.0" serde_json = "1.0" serde_derive = "1.0" error-chain = { version = "0.12", default-features = false } +lru-cache = "0.1" [dev-dependencies] tempdir = "0.3" diff --git a/util/network-devp2p/src/discovery.rs b/util/network-devp2p/src/discovery.rs index fd5af2764..d82ec294f 100644 --- a/util/network-devp2p/src/discovery.rs +++ b/util/network-devp2p/src/discovery.rs @@ -20,6 +20,7 @@ use std::collections::{HashSet, HashMap, VecDeque}; use std::collections::hash_map::Entry; use std::default::Default; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use lru_cache::LruCache; use hash::keccak; use ethereum_types::{H256, H520}; use rlp::{Rlp, RlpStream}; @@ -55,6 +56,8 @@ const REQUEST_BACKOFF: [Duration; 4] = [ const NODE_LAST_SEEN_TIMEOUT: Duration = Duration::from_secs(24*60*60); +const OBSERVED_NODES_MAX_SIZE: usize = 10_000; + #[derive(Clone, Debug)] pub struct NodeEntry { pub id: NodeId, @@ -95,7 +98,27 @@ struct FindNodeRequest { #[derive(Clone, Copy)] enum PingReason { Default, - FromDiscoveryRequest(NodeId) + FromDiscoveryRequest(NodeId, NodeValidity), +} + +#[derive(Clone, Copy, PartialEq)] +enum NodeCategory { + Bucket, + Observed +} + +#[derive(Clone, Copy, PartialEq)] +enum NodeValidity { + Ourselves, + ValidNode(NodeCategory), + ExpiredNode(NodeCategory), + UnknownNode +} + +#[derive(Debug)] +enum BucketError { + Ourselves, + NotInTheBucket{node_entry: NodeEntry, bucket_distance: usize}, } struct PingRequest { @@ -145,6 +168,12 @@ pub struct Discovery<'a> { discovery_id: NodeId, discovery_nodes: HashSet, node_buckets: Vec, + + // Sometimes we don't want to add nodes to the NodeTable, but still want to + // keep track of them to avoid excessive pinging (happens when an unknown node sends + // a discovery request to us -- the node might be on a different net). + other_observed_nodes: LruCache, + in_flight_pings: HashMap, in_flight_find_nodes: HashMap, send_queue: VecDeque, @@ -171,6 +200,7 @@ impl<'a> Discovery<'a> { discovery_id: NodeId::new(), discovery_nodes: HashSet::new(), node_buckets: (0..ADDRESS_BITS).map(|_| NodeBucket::new()).collect(), + other_observed_nodes: LruCache::new(OBSERVED_NODES_MAX_SIZE), in_flight_pings: HashMap::new(), in_flight_find_nodes: HashMap::new(), send_queue: VecDeque::new(), @@ -200,41 +230,53 @@ impl<'a> Discovery<'a> { } } - fn update_node(&mut self, e: NodeEntry) -> Option { - trace!(target: "discovery", "Inserting {:?}", &e); + fn update_bucket_record(&mut self, e: NodeEntry) -> Result<(), BucketError> { let id_hash = keccak(e.id); let dist = match Discovery::distance(&self.id_hash, &id_hash) { Some(dist) => dist, None => { debug!(target: "discovery", "Attempted to update own entry: {:?}", e); - return None; + return Err(BucketError::Ourselves); } }; + let bucket = &mut self.node_buckets[dist]; + bucket.nodes.iter_mut().find(|n| n.address.id == e.id) + .map_or(Err(BucketError::NotInTheBucket{node_entry: e.clone(), bucket_distance: dist}.into()), |entry| { + entry.address = e; + entry.last_seen = Instant::now(); + entry.backoff_until = Instant::now(); + entry.fail_count = 0; + Ok(()) + }) + } - let mut added_map = HashMap::new(); - let ping = { - let bucket = &mut self.node_buckets[dist]; - let updated = if let Some(node) = bucket.nodes.iter_mut().find(|n| n.address.id == e.id) { - node.address = e.clone(); - node.last_seen = Instant::now(); - node.backoff_until = Instant::now(); - node.fail_count = 0; - true - } else { false }; + fn update_node(&mut self, e: NodeEntry) -> Option { + trace!(target: "discovery", "Inserting {:?}", &e); - if !updated { - added_map.insert(e.id, e.clone()); - bucket.nodes.push_front(BucketEntry::new(e)); + match self.update_bucket_record(e) { + Ok(()) => None, + Err(BucketError::Ourselves) => None, + Err(BucketError::NotInTheBucket{node_entry, bucket_distance}) => Some((node_entry, bucket_distance)) + }.map(|(node_entry, bucket_distance)| { + trace!(target: "discovery", "Adding a new node {:?} into our bucket {}", &node_entry, bucket_distance); + let mut added = HashMap::with_capacity(1); + added.insert(node_entry.id, node_entry.clone()); + + let node_to_ping = { + let bucket = &mut self.node_buckets[bucket_distance]; + bucket.nodes.push_front(BucketEntry::new(node_entry)); if bucket.nodes.len() > BUCKET_SIZE { select_bucket_ping(bucket.nodes.iter()) - } else { None } - } else { None } - }; - if let Some(node) = ping { - self.try_ping(node, PingReason::Default); - } - Some(TableUpdates { added: added_map, removed: HashSet::new() }) + } else { + None + } + }; + if let Some(node) = node_to_ping { + self.try_ping(node, PingReason::Default); + }; + TableUpdates{added, removed: HashSet::new()} + }) } /// Starts the discovery process at round 0 @@ -542,10 +584,28 @@ impl<'a> Discovery<'a> { }; if let Some((node, ping_reason)) = expected_node { - if let PingReason::FromDiscoveryRequest(target) = ping_reason { + if let PingReason::FromDiscoveryRequest(target, validity) = ping_reason { self.respond_with_discovery(target, &node)?; + // kirushik: I would prefer to probe the network id of the remote node here, and add it to the nodes list if it's on "our" net -- + // but `on_packet` happens synchronously, so doing the full TCP handshake ceremony here is a bad idea. + // So instead we just LRU-caching most recently seen nodes to avoid unnecessary pinging + match validity { + NodeValidity::ValidNode(NodeCategory::Bucket) | NodeValidity::ExpiredNode(NodeCategory::Bucket) => { + trace!(target: "discovery", "Updating node {:?} in our Kad buckets", &node); + self.update_bucket_record(node).unwrap_or_else(|error| { + debug!(target: "discovery", "Error occured when processing ping from a bucket node: {:?}", &error); + }); + }, + NodeValidity::UnknownNode | NodeValidity::ExpiredNode(NodeCategory::Observed) | NodeValidity::ValidNode(NodeCategory::Observed)=> { + trace!(target: "discovery", "Updating node {:?} in the list of other_observed_nodes", &node); + self.other_observed_nodes.insert(node.id, (node.endpoint, Instant::now())); + }, + NodeValidity::Ourselves => (), + } + Ok(None) + } else { + Ok(self.update_node(node)) } - Ok(self.update_node(node)) } else { debug!(target: "discovery", "Got unexpected Pong from {:?} ; request not found", &from); Ok(None) @@ -566,31 +626,41 @@ impl<'a> Discovery<'a> { } }; - if self.is_a_valid_known_node(&node) { - self.respond_with_discovery(target, &node)?; - } else { + match self.check_validity(&node) { + NodeValidity::Ourselves => (), // It makes no sense to respond to the discovery request from ourselves + NodeValidity::ValidNode(_) => self.respond_with_discovery(target, &node)?, // Make sure the request source is actually there and responds to pings before actually responding - self.try_ping(node, PingReason::FromDiscoveryRequest(target)); + invalidity_reason => self.try_ping(node, PingReason::FromDiscoveryRequest(target, invalidity_reason)) } Ok(None) } - fn is_a_valid_known_node(&self, node: &NodeEntry) -> bool { + fn check_validity(&mut self, node: &NodeEntry) -> NodeValidity { let id_hash = keccak(node.id); let dist = match Discovery::distance(&self.id_hash, &id_hash) { Some(dist) => dist, None => { debug!(target: "discovery", "Got an incoming discovery request from self: {:?}", node); - return false; + return NodeValidity::Ourselves; } }; let bucket = &self.node_buckets[dist]; if let Some(known_node) = bucket.nodes.iter().find(|n| n.address.id == node.id) { debug!(target: "discovery", "Found a known node in a bucket when processing discovery: {:?}/{:?}", known_node, node); - (known_node.address.endpoint == node.endpoint) && (known_node.last_seen.elapsed() < NODE_LAST_SEEN_TIMEOUT) + match ((known_node.address.endpoint == node.endpoint), (known_node.last_seen.elapsed() < NODE_LAST_SEEN_TIMEOUT)) { + (true, true) => NodeValidity::ValidNode(NodeCategory::Bucket), + (true, false) => NodeValidity::ExpiredNode(NodeCategory::Bucket), + _ => NodeValidity::UnknownNode + } } else { - false + self.other_observed_nodes.get_mut(&node.id).map_or(NodeValidity::UnknownNode, |(endpoint, observed_at)| { + match ((node.endpoint==*endpoint), (observed_at.elapsed() < NODE_LAST_SEEN_TIMEOUT)) { + (true, true) => NodeValidity::ValidNode(NodeCategory::Observed), + (true, false) => NodeValidity::ExpiredNode(NodeCategory::Observed), + _ => NodeValidity::UnknownNode + } + }) } } diff --git a/util/network-devp2p/src/lib.rs b/util/network-devp2p/src/lib.rs index 68d70552d..1141c9545 100644 --- a/util/network-devp2p/src/lib.rs +++ b/util/network-devp2p/src/lib.rs @@ -85,6 +85,7 @@ extern crate keccak_hash as hash; extern crate serde; extern crate serde_json; extern crate parity_snappy as snappy; +extern crate lru_cache; #[macro_use] extern crate error_chain; diff --git a/util/version/Cargo.toml b/util/version/Cargo.toml index f548ddb0a..aaf27e6c3 100644 --- a/util/version/Cargo.toml +++ b/util/version/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "parity-version" # NOTE: this value is used for Parity Ethereum version string (via env CARGO_PKG_VERSION) -version = "2.2.9" +version = "2.2.10" authors = ["Parity Technologies "] build = "build.rs"