From 1879dbca8aa1c71a2335abaf685954a92d13306f Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Wed, 22 Mar 2017 16:39:40 +0100 Subject: [PATCH 01/11] Consistent store naming in the Signer components (#4996) --- .../components/RequestPending/requestPending.js | 8 ++++---- .../Signer/components/SignRequest/signRequest.js | 10 +++++----- .../components/SignRequest/signRequest.spec.js | 2 +- .../TransactionPending/transactionPending.js | 14 +++++++------- .../views/Signer/containers/Embedded/embedded.js | 2 +- .../Signer/containers/RequestsPage/requestsPage.js | 2 +- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/js/src/views/Signer/components/RequestPending/requestPending.js b/js/src/views/Signer/components/RequestPending/requestPending.js index 671df6cbb..3d8ea0c15 100644 --- a/js/src/views/Signer/components/RequestPending/requestPending.js +++ b/js/src/views/Signer/components/RequestPending/requestPending.js @@ -36,7 +36,7 @@ export default class RequestPending extends Component { PropTypes.shape({ sign: PropTypes.object.isRequired }), PropTypes.shape({ signTransaction: PropTypes.object.isRequired }) ]).isRequired, - signerstore: PropTypes.object.isRequired + signerStore: PropTypes.object.isRequired }; static defaultProps = { @@ -45,7 +45,7 @@ export default class RequestPending extends Component { }; render () { - const { className, date, focus, gasLimit, id, isSending, netVersion, onReject, payload, signerstore, origin } = this.props; + const { className, date, focus, gasLimit, id, isSending, netVersion, onReject, payload, signerStore, origin } = this.props; if (payload.sign) { const { sign } = payload; @@ -63,7 +63,7 @@ export default class RequestPending extends Component { onConfirm={ this.onConfirm } onReject={ onReject } origin={ origin } - signerstore={ signerstore } + signerStore={ signerStore } /> ); } @@ -83,7 +83,7 @@ export default class RequestPending extends Component { onConfirm={ this.onConfirm } onReject={ onReject } origin={ origin } - signerstore={ signerstore } + signerStore={ signerStore } transaction={ transaction } /> ); diff --git a/js/src/views/Signer/components/SignRequest/signRequest.js b/js/src/views/Signer/components/SignRequest/signRequest.js index 373262d41..8c9984b4b 100644 --- a/js/src/views/Signer/components/SignRequest/signRequest.js +++ b/js/src/views/Signer/components/SignRequest/signRequest.js @@ -47,7 +47,7 @@ export default class SignRequest extends Component { id: PropTypes.object.isRequired, isFinished: PropTypes.bool.isRequired, netVersion: PropTypes.string.isRequired, - signerstore: PropTypes.object.isRequired, + signerStore: PropTypes.object.isRequired, className: PropTypes.string, focus: PropTypes.bool, @@ -67,9 +67,9 @@ export default class SignRequest extends Component { }; componentWillMount () { - const { address, signerstore } = this.props; + const { address, signerStore } = this.props; - signerstore.fetchBalance(address); + signerStore.fetchBalance(address); } render () { @@ -106,8 +106,8 @@ export default class SignRequest extends Component { renderDetails () { const { api } = this.context; - const { address, data, netVersion, origin, signerstore } = this.props; - const { balances, externalLink } = signerstore; + const { address, data, netVersion, origin, signerStore } = this.props; + const { balances, externalLink } = signerStore; const balance = balances[address]; diff --git a/js/src/views/Signer/components/SignRequest/signRequest.spec.js b/js/src/views/Signer/components/SignRequest/signRequest.spec.js index cf7d7e9f6..c5c87d509 100644 --- a/js/src/views/Signer/components/SignRequest/signRequest.spec.js +++ b/js/src/views/Signer/components/SignRequest/signRequest.spec.js @@ -28,7 +28,7 @@ const store = { describe('views/Signer/components/SignRequest', () => { it('renders', () => { expect(shallow( - , + , )).to.be.ok; }); }); diff --git a/js/src/views/Signer/components/TransactionPending/transactionPending.js b/js/src/views/Signer/components/TransactionPending/transactionPending.js index 9b0b91ef6..90ea75b22 100644 --- a/js/src/views/Signer/components/TransactionPending/transactionPending.js +++ b/js/src/views/Signer/components/TransactionPending/transactionPending.js @@ -48,7 +48,7 @@ class TransactionPending extends Component { onConfirm: PropTypes.func.isRequired, onReject: PropTypes.func.isRequired, origin: PropTypes.any, - signerstore: PropTypes.object.isRequired, + signerStore: PropTypes.object.isRequired, transaction: PropTypes.shape({ condition: PropTypes.object, data: PropTypes.string, @@ -75,10 +75,10 @@ class TransactionPending extends Component { gasPrice: this.props.transaction.gasPrice.toFixed() }); - hwstore = HardwareStore.get(this.context.api); + hardwareStore = HardwareStore.get(this.context.api); componentWillMount () { - const { signerstore, transaction } = this.props; + const { signerStore, transaction } = this.props; const { from, gas, gasPrice, to, value } = transaction; const fee = tUtil.getFee(gas, gasPrice); // BigNumber object @@ -88,7 +88,7 @@ class TransactionPending extends Component { this.setState({ gasPriceEthmDisplay, totalValue, gasToDisplay }); this.gasStore.setEthValue(value); - signerstore.fetchBalances([from, to]); + signerStore.fetchBalances([from, to]); } render () { @@ -98,13 +98,13 @@ class TransactionPending extends Component { } renderTransaction () { - const { accounts, className, focus, id, isSending, netVersion, origin, signerstore, transaction } = this.props; + const { accounts, className, focus, id, isSending, netVersion, origin, signerStore, transaction } = this.props; const { totalValue } = this.state; - const { balances, externalLink } = signerstore; + const { balances, externalLink } = signerStore; const { from, value } = transaction; const fromBalance = balances[from]; const account = accounts[from] || {}; - const disabled = account.hardware && !this.hwstore.isConnected(from); + const disabled = account.hardware && !this.hardwareStore.isConnected(from); return (
diff --git a/js/src/views/Signer/containers/Embedded/embedded.js b/js/src/views/Signer/containers/Embedded/embedded.js index adac35621..b79b4f203 100644 --- a/js/src/views/Signer/containers/Embedded/embedded.js +++ b/js/src/views/Signer/containers/Embedded/embedded.js @@ -101,7 +101,7 @@ class Embedded extends Component { onReject={ actions.startRejectRequest } origin={ origin } payload={ payload } - signerstore={ this.store } + signerStore={ this.store } /> ); } diff --git a/js/src/views/Signer/containers/RequestsPage/requestsPage.js b/js/src/views/Signer/containers/RequestsPage/requestsPage.js index c0cddb8d0..8af2e5fd5 100644 --- a/js/src/views/Signer/containers/RequestsPage/requestsPage.js +++ b/js/src/views/Signer/containers/RequestsPage/requestsPage.js @@ -141,7 +141,7 @@ class RequestsPage extends Component { onReject={ actions.startRejectRequest } origin={ origin } payload={ payload } - signerstore={ this.store } + signerStore={ this.store } /> ); } From 5df3f5d136383e8647ffe846b99ea6059e0d7cb1 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Wed, 22 Mar 2017 16:39:49 +0100 Subject: [PATCH 02/11] Show busy indicator, focus first field (#4997) --- .../modals/PasswordManager/passwordManager.js | 261 ++++++++++-------- 1 file changed, 144 insertions(+), 117 deletions(-) diff --git a/js/src/modals/PasswordManager/passwordManager.js b/js/src/modals/PasswordManager/passwordManager.js index 91ac9ebf2..6436bc4cc 100644 --- a/js/src/modals/PasswordManager/passwordManager.js +++ b/js/src/modals/PasswordManager/passwordManager.js @@ -60,8 +60,11 @@ class PasswordManager extends Component { store = new Store(this.context.api, this.props.account); render () { + const { busy } = this.store; + return ( -
-
- - } - label={ - - } - onChange={ this.onEditTestPassword } - onSubmit={ this.testPassword } - submitOnBlur={ false } - type='password' - /> -
-
+ { this.renderTabTest() } -
-
- - } - label={ - - } - onChange={ this.onEditCurrentPassword } - type='password' - /> - - } - label={ - - } - onChange={ this.onEditNewPasswordHint } - value={ passwordHint } - /> -
-
- - } - label={ - - } - onChange={ this.onEditNewPassword } - onSubmit={ this.changePassword } - submitOnBlur={ false } - type='password' - /> -
-
- - } - hint={ - - } - label={ - - } - onChange={ this.onEditNewPasswordRepeat } - onSubmit={ this.changePassword } - submitOnBlur={ false } - type='password' - /> -
-
- - -
-
+ { this.renderTabChange() }
); } + renderTabTest () { + const { actionTab, busy } = this.store; + + if (actionTab !== TEST_ACTION) { + return null; + } + + return ( +
+
+ + } + label={ + + } + onChange={ this.onEditTestPassword } + onSubmit={ this.testPassword } + submitOnBlur={ false } + type='password' + /> +
+
+ ); + } + + renderTabChange () { + const { actionTab, busy, isRepeatValid, newPassword, passwordHint } = this.store; + + if (actionTab !== CHANGE_ACTION) { + return null; + } + + return ( +
+
+ + } + label={ + + } + onChange={ this.onEditCurrentPassword } + type='password' + /> + + } + label={ + + } + onChange={ this.onEditNewPasswordHint } + value={ passwordHint } + /> +
+
+ + } + label={ + + } + onChange={ this.onEditNewPassword } + onSubmit={ this.changePassword } + submitOnBlur={ false } + type='password' + /> +
+
+ + } + hint={ + + } + label={ + + } + onChange={ this.onEditNewPasswordRepeat } + onSubmit={ this.changePassword } + submitOnBlur={ false } + type='password' + /> +
+
+ + +
+
+ ); + } + renderDialogActions () { const { actionTab, busy, isRepeatValid } = this.store; From 6b4cb351499fa274893aa19d0e929c830148723a Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Wed, 22 Mar 2017 16:39:57 +0100 Subject: [PATCH 03/11] Fix FireFox overflows (#5000) * Max width for container * Set min-width --- js/src/ui/Container/container.css | 1 + js/src/ui/SectionList/sectionList.css | 2 ++ 2 files changed, 3 insertions(+) diff --git a/js/src/ui/Container/container.css b/js/src/ui/Container/container.css index d496931b9..3d66d2655 100644 --- a/js/src/ui/Container/container.css +++ b/js/src/ui/Container/container.css @@ -24,6 +24,7 @@ $transitionAll: all 0.75s cubic-bezier(0.23, 1, 0.32, 1); flex: 1; height: 100%; padding: 0em; + max-width: 100%; position: relative; /*transform: translateZ(0); transition: $transitionAll;*/ diff --git a/js/src/ui/SectionList/sectionList.css b/js/src/ui/SectionList/sectionList.css index 4613371ad..e81d8ce07 100644 --- a/js/src/ui/SectionList/sectionList.css +++ b/js/src/ui/SectionList/sectionList.css @@ -18,6 +18,7 @@ $transition: all 0.25s; $widthNormal: 33.33%; $widthExpanded: 42%; +$widthContracted: 29%; .section { position: relative; @@ -45,6 +46,7 @@ $widthExpanded: 42%; display: flex; flex: 0 1 $widthNormal; max-width: $widthNormal; + min-width: $widthContracted; opacity: 0.85; padding: 0.25em; From 5255b72f673a1afc9c3946b08f7e2936e47eac34 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Wed, 22 Mar 2017 16:10:12 +0000 Subject: [PATCH 04/11] [ci skip] js-precompiled 20170322-160703 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 458e3cae5..3014f3a54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1749,7 +1749,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/ethcore/js-precompiled.git#9651995aa0fa718b9b9b58f1c7281900643bdf8f" +source = "git+https://github.com/ethcore/js-precompiled.git#f44837a345d170642d44f953393aeea7a7cb9eab" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 54cc076fe..716028271 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.26", + "version": "1.7.27", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 41f66f33d54bfc3ecc4e34bd4defc60aa6c7c391 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Wed, 22 Mar 2017 16:22:41 +0000 Subject: [PATCH 05/11] [ci skip] js-precompiled 20170322-161945 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3014f3a54..73f2d6c73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1749,7 +1749,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/ethcore/js-precompiled.git#f44837a345d170642d44f953393aeea7a7cb9eab" +source = "git+https://github.com/ethcore/js-precompiled.git#476a016aa2458b77f49fbfd500c4d9b03456165f" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 716028271..dcdcc9097 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.27", + "version": "1.7.28", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From e5c2b2535128342d628be65ea18cd9db9cd3ad12 Mon Sep 17 00:00:00 2001 From: Craig O'Connor Date: Thu, 23 Mar 2017 05:14:32 -0400 Subject: [PATCH 06/11] auto lint (#5003) * auto lint * Usage consistency update --- js/package.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/js/package.json b/js/package.json index dcdcc9097..f92cc7d6b 100644 --- a/js/package.json +++ b/js/package.json @@ -48,8 +48,10 @@ "lint": "npm run lint:css && npm run lint:js", "lint:cached": "npm run lint:css && npm run lint:js:cached", "lint:css": "stylelint ./src/**/*.css", + "lint:fix": "npm run lint:js:fix", "lint:js": "eslint --ignore-path .gitignore ./src/", "lint:js:cached": "eslint --cache --ignore-path .gitignore ./src/", + "lint:js:fix": "eslint --fix --ignore-path .gitignore ./src/", "test": "NODE_ENV=test mocha --compilers ejs:ejsify 'src/**/*.spec.js'", "test:coverage": "NODE_ENV=test istanbul cover _mocha -- --compilers ejs:ejsify 'src/**/*.spec.js'", "test:e2e": "NODE_ENV=test mocha 'src/**/*.e2e.js'", From b931a225ba571e29a80b8a6d4b772ed338274fe2 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Thu, 23 Mar 2017 09:36:34 +0000 Subject: [PATCH 07/11] [ci skip] js-precompiled 20170323-093322 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 73f2d6c73..99ebbe7e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1749,7 +1749,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/ethcore/js-precompiled.git#476a016aa2458b77f49fbfd500c4d9b03456165f" +source = "git+https://github.com/ethcore/js-precompiled.git#a44b1cb29b80e4d3372ee47494499a61db7a8116" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index f92cc7d6b..6a1e63fb1 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.28", + "version": "1.7.29", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 64cec5ff7df6726d8cd5b83f1678975fe009f7b8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 13:17:05 +0100 Subject: [PATCH 08/11] Implement PIP messages, request builder, and handlers (#4945) * return errors on database corruption * fix tests, json tests * fix remainder of build * buffer flow -> request credits * proving state backend * generate transaction proofs from provider * network messages for transaction proof * transaction proof test * test for transaction proof message * fix call bug * request transaction proofs from on_demand * most of proved_execution rpc * proved execution future * initial request definitions * RLP encoding and decoding for requests * proofs of non-existance in ProvingBlockChainClient * new requests in provider. * encode and decode responses * complete initial request changes * handle request packet in LightProtocol * handle response packets * implement requesting from * re-do cost table * get tests compiling * fix cost table RLP encoding * roundtrip tests for request types * request builder tests * move request_builder -> request::builder * get network tests working * return only complete headers responses * request builder improvements * New version of jsonrpc. * split request filling into fill,complete * Better invalid encoding messages * Fixing deprecated methods of tokio_core * use PIP messages in on_demand, old API * migrate oneshot::complete to send in on_demand * get on_demand tests to compile * port ethsync to PIP messages * adjust to minor on_demand API changes in RPC * Using dedicated branch for jsonrpc * Bump --- ethcore/light/Cargo.toml | 2 +- ethcore/light/src/client/header_chain.rs | 1 - ethcore/light/src/client/mod.rs | 49 +- ethcore/light/src/lib.rs | 4 +- ethcore/light/src/net/context.rs | 35 +- ethcore/light/src/net/error.rs | 4 + ethcore/light/src/net/mod.rs | 900 ++--------- ethcore/light/src/net/request_credits.rs | 205 ++- ethcore/light/src/net/request_set.rs | 59 +- ethcore/light/src/net/tests/mod.rs | 309 ++-- ethcore/light/src/on_demand/mod.rs | 627 +++---- ethcore/light/src/on_demand/request.rs | 67 +- ethcore/light/src/provider.rs | 242 ++- ethcore/light/src/types/les_request.rs | 228 --- ethcore/light/src/types/mod.rs.in | 2 +- ethcore/light/src/types/request/builder.rs | 190 +++ ethcore/light/src/types/request/mod.rs | 1710 ++++++++++++++++++++ ethcore/src/client/client.rs | 18 +- ethcore/src/client/test_client.rs | 13 +- ethcore/src/client/traits.rs | 12 +- ethcore/src/state/account.rs | 13 +- ethcore/src/state/mod.rs | 45 +- rpc/src/v1/helpers/dispatch.rs | 5 +- rpc/src/v1/impls/light/eth.rs | 23 +- sync/src/light_sync/mod.rs | 65 +- sync/src/light_sync/response.rs | 34 +- sync/src/light_sync/sync_round.rs | 27 +- sync/src/light_sync/tests/test_net.rs | 6 +- 28 files changed, 2800 insertions(+), 2095 deletions(-) delete mode 100644 ethcore/light/src/types/les_request.rs create mode 100644 ethcore/light/src/types/request/builder.rs create mode 100644 ethcore/light/src/types/request/mod.rs diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index d8844dc3f..6f95d8a0e 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -1,5 +1,5 @@ [package] -description = "Parity LES primitives" +description = "Parity Light Client Implementation" homepage = "http://parity.io" license = "GPL-3.0" name = "ethcore-light" diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 575938cd5..9dcd25888 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -24,7 +24,6 @@ //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. // TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers` -// use std::collections::{BTreeMap, HashMap}; diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 2872e0eec..c791caed1 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{Bytes, DBValue, H256, Mutex, RwLock}; +use util::{H256, Mutex, RwLock}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -315,50 +315,3 @@ impl LightChainClient for Client { Client::cht_root(self, i) } } - -// dummy implementation, should be removed when a `TestClient` is added. -impl ::provider::Provider for Client { - fn chain_info(&self) -> BlockChainInfo { - Client::chain_info(self) - } - - fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { - None - } - - fn earliest_state(&self) -> Option { - None - } - - fn block_header(&self, id: BlockId) -> Option { - Client::block_header(self, id) - } - - fn block_body(&self, _id: BlockId) -> Option { - None - } - - fn block_receipts(&self, _hash: &H256) -> Option { - None - } - - fn state_proof(&self, _req: ::request::StateProof) -> Vec { - Vec::new() - } - - fn contract_code(&self, _req: ::request::ContractCode) -> Bytes { - Vec::new() - } - - fn header_proof(&self, _req: ::request::HeaderProof) -> Option<(encoded::Header, Vec)> { - None - } - - fn transaction_proof(&self, _req: ::request::TransactionProof) -> Option> { - None - } - - fn ready_transactions(&self) -> Vec<::ethcore::transaction::PendingTransaction> { - Vec::new() - } -} diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index b6e06a02b..ada58d8de 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -26,7 +26,7 @@ //! use-cases like sending transactions from a personal account. //! //! The light client performs a header-only sync, doing verification and pruning -//! historical blocks. Upon pruning, batches of 2048 blocks have a number => hash +//! historical blocks. Upon pruning, batches of 2048 blocks have a number => (hash, TD) //! mapping sealed into "canonical hash tries" which can later be used to verify //! historical block queries from peers. @@ -57,7 +57,7 @@ mod types; pub use self::provider::Provider; pub use self::transaction_queue::TransactionQueue; -pub use types::les_request as request; +pub use types::request as request; #[macro_use] extern crate log; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index bd0c8a6bb..9eafead57 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -12,7 +12,7 @@ // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// along with Parity. If not, see . //! I/O and event context generalizations. @@ -20,7 +20,7 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; -use request::{self, Request}; +use request::Requests; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions @@ -50,13 +50,13 @@ pub trait IoContext { impl<'a> IoContext for NetworkContext<'a> { fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { if let Err(e) = self.send(peer, packet_id, packet_body) { - debug!(target: "les", "Error sending packet to peer {}: {}", peer, e); + debug!(target: "pip", "Error sending packet to peer {}: {}", peer, e); } } fn respond(&self, packet_id: u8, packet_body: Vec) { if let Err(e) = self.respond(packet_id, packet_body) { - debug!(target: "les", "Error responding to peer message: {}", e); + debug!(target: "pip", "Error responding to peer message: {}", e); } } @@ -83,16 +83,17 @@ pub trait BasicContext { fn persistent_peer_id(&self, peer: PeerId) -> Option; /// Make a request from a peer. - fn request_from(&self, peer: PeerId, request: Request) -> Result; + /// + /// Fails on: nonexistent peer, network error, peer not server, + /// insufficient credits. Does not check capabilities before sending. + /// On success, returns a request id which can later be coordinated + /// with an event. + fn request_from(&self, peer: PeerId, request: Requests) -> Result; /// Make an announcement of new capabilities to the rest of the peers. // TODO: maybe just put this on a timer in LightProtocol? fn make_announcement(&self, announcement: Announcement); - /// Find the maximum number of requests of a specific type which can be made from - /// supplied peer. - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize; - /// Disconnect a peer. fn disconnect_peer(&self, peer: PeerId); @@ -123,18 +124,14 @@ impl<'a> BasicContext for TickCtx<'a> { self.io.persistent_peer_id(id) } - fn request_from(&self, peer: PeerId, request: Request) -> Result { - self.proto.request_from(self.io, &peer, request) + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, &peer, requests) } fn make_announcement(&self, announcement: Announcement) { self.proto.make_announcement(self.io, announcement); } - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.proto.max_requests(peer, kind) - } - fn disconnect_peer(&self, peer: PeerId) { self.io.disconnect_peer(peer); } @@ -160,18 +157,14 @@ impl<'a> BasicContext for Ctx<'a> { self.io.persistent_peer_id(id) } - fn request_from(&self, peer: PeerId, request: Request) -> Result { - self.proto.request_from(self.io, &peer, request) + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, &peer, requests) } fn make_announcement(&self, announcement: Announcement) { self.proto.make_announcement(self.io, announcement); } - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.proto.max_requests(peer, kind) - } - fn disconnect_peer(&self, peer: PeerId) { self.io.disconnect_peer(peer); } diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index dda78e0b6..1c0374c7e 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -56,6 +56,8 @@ pub enum Error { UnknownPeer, /// Unsolicited response. UnsolicitedResponse, + /// Bad back-reference in request. + BadBackReference, /// Not a server. NotServer, /// Unsupported protocol version. @@ -78,6 +80,7 @@ impl Error { Error::WrongNetwork => Punishment::Disable, Error::UnknownPeer => Punishment::Disconnect, Error::UnsolicitedResponse => Punishment::Disable, + Error::BadBackReference => Punishment::Disable, Error::NotServer => Punishment::Disable, Error::UnsupportedProtocolVersion(_) => Punishment::Disable, Error::BadProtocolVersion => Punishment::Disable, @@ -109,6 +112,7 @@ impl fmt::Display for Error { Error::WrongNetwork => write!(f, "Wrong network"), Error::UnknownPeer => write!(f, "Unknown peer"), Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"), + Error::BadBackReference => write!(f, "Bad back-reference in request."), Error::NotServer => write!(f, "Peer not a server."), Error::UnsupportedProtocolVersion(pv) => write!(f, "Unsupported protocol version: {}", pv), Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"), diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 3830dde4b..667e07cb4 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -14,19 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! LES Protocol Version 1 implementation. +//! PIP Protocol Version 1 implementation. //! //! This uses a "Provider" to answer requests. -//! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) -use ethcore::transaction::{Action, UnverifiedTransaction}; -use ethcore::receipt::Receipt; +use ethcore::transaction::UnverifiedTransaction; use io::TimerToken; use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use rlp::{RlpStream, UntrustedRlp}; use util::hash::H256; -use util::{Bytes, DBValue, Mutex, RwLock, U256}; +use util::{DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; use std::collections::HashMap; @@ -35,7 +33,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{self, HashOrNumber, Request}; +use request::{Request, Requests, Response}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; @@ -72,8 +70,8 @@ pub const PROTOCOL_VERSIONS: &'static [u8] = &[1]; /// Max protocol version. pub const MAX_PROTOCOL_VERSION: u8 = 1; -/// Packet count for LES. -pub const PACKET_COUNT: u8 = 17; +/// Packet count for PIP. +pub const PACKET_COUNT: u8 = 5; // packet ID definitions. mod packet { @@ -83,49 +81,27 @@ mod packet { // announcement of new block hashes or capabilities. pub const ANNOUNCE: u8 = 0x01; - // request and response for block headers - pub const GET_BLOCK_HEADERS: u8 = 0x02; - pub const BLOCK_HEADERS: u8 = 0x03; - - // request and response for block bodies - pub const GET_BLOCK_BODIES: u8 = 0x04; - pub const BLOCK_BODIES: u8 = 0x05; - - // request and response for transaction receipts. - pub const GET_RECEIPTS: u8 = 0x06; - pub const RECEIPTS: u8 = 0x07; - - // request and response for merkle proofs. - pub const GET_PROOFS: u8 = 0x08; - pub const PROOFS: u8 = 0x09; - - // request and response for contract code. - pub const GET_CONTRACT_CODES: u8 = 0x0a; - pub const CONTRACT_CODES: u8 = 0x0b; + // request and response. + pub const REQUEST: u8 = 0x02; + pub const RESPONSE: u8 = 0x03; // relay transactions to peers. - pub const SEND_TRANSACTIONS: u8 = 0x0c; - - // request and response for header proofs in a CHT. - pub const GET_HEADER_PROOFS: u8 = 0x0d; - pub const HEADER_PROOFS: u8 = 0x0e; - - // request and response for transaction proof. - pub const GET_TRANSACTION_PROOF: u8 = 0x0f; - pub const TRANSACTION_PROOF: u8 = 0x10; + pub const SEND_TRANSACTIONS: u8 = 0x04; } // timeouts for different kinds of requests. all values are in milliseconds. -// TODO: variable timeouts based on request count. mod timeout { pub const HANDSHAKE: i64 = 2500; - pub const HEADERS: i64 = 5000; - pub const BODIES: i64 = 5000; - pub const RECEIPTS: i64 = 3500; - pub const PROOFS: i64 = 4000; - pub const CONTRACT_CODES: i64 = 5000; - pub const HEADER_PROOFS: i64 = 3500; - pub const TRANSACTION_PROOF: i64 = 5000; + pub const BASE: i64 = 1500; // base timeout for packet. + + // timeouts per request within packet. + pub const HEADERS: i64 = 250; // per header? + pub const BODY: i64 = 50; + pub const RECEIPT: i64 = 50; + pub const PROOF: i64 = 100; // state proof + pub const CONTRACT_CODE: i64 = 100; + pub const HEADER_PROOF: i64 = 100; + pub const TRANSACTION_PROOF: i64 = 1000; // per gas? } /// A request id. @@ -158,27 +134,7 @@ pub struct Peer { failed_requests: Vec, } -impl Peer { - // check the maximum cost of a request, returning an error if there's - // not enough credits left. - // returns the calculated maximum cost. - fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result { - flow_params.recharge(&mut self.local_credits); - - let max_cost = flow_params.compute_cost(kind, max); - self.local_credits.deduct_cost(max_cost)?; - Ok(max_cost) - } - - // refund credits for a request. returns new amount of credits. - fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { - flow_params.refund(&mut self.local_credits, amount); - - self.local_credits.current() - } -} - -/// An LES event handler. +/// A light protocol event handler. /// /// Each handler function takes a context which describes the relevant peer /// and gives references to the IO layer and protocol structure so new messages @@ -197,20 +153,10 @@ pub trait Handler: Send + Sync { fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { } /// Called when a peer requests relay of some transactions. fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { } - /// Called when a peer responds with block bodies. - fn on_block_bodies(&self, _ctx: &EventContext, _req_id: ReqId, _bodies: &[Bytes]) { } - /// Called when a peer responds with block headers. - fn on_block_headers(&self, _ctx: &EventContext, _req_id: ReqId, _headers: &[Bytes]) { } - /// Called when a peer responds with block receipts. - fn on_receipts(&self, _ctx: &EventContext, _req_id: ReqId, _receipts: &[Vec]) { } - /// Called when a peer responds with state proofs. Each proof should be a series of trie - /// nodes in ascending order by distance from the root. - fn on_state_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[Vec]) { } - /// Called when a peer responds with contract code. - fn on_code(&self, _ctx: &EventContext, _req_id: ReqId, _codes: &[Bytes]) { } - /// Called when a peer responds with header proofs. Each proof should be a block header coupled - /// with a series of trie nodes is ascending order by distance from the root. - fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } + /// Called when a peer responds to requests. + /// Responses not guaranteed to contain valid data and are not yet checked against + /// the requests they correspond to. + fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _responses: &[Response]) { } /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } /// Called to "tick" the handler periodically. @@ -307,7 +253,7 @@ pub struct LightProtocol { impl LightProtocol { /// Create a new instance of the protocol manager. pub fn new(provider: Arc, params: Params) -> Self { - debug!(target: "les", "Initializing LES handler"); + debug!(target: "pip", "Initializing light protocol handler"); let genesis_hash = provider.chain_info().genesis_hash; LightProtocol { @@ -339,62 +285,43 @@ impl LightProtocol { ) } - /// Check the maximum amount of requests of a specific type - /// which a peer would be able to serve. Returns zero if the - /// peer is unknown or has no credit parameters. - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.peers.read().get(&peer).and_then(|peer| { - let mut peer = peer.lock(); - match peer.remote_flow { - Some((ref mut c, ref flow)) => { - flow.recharge(c); - Some(flow.max_amount(&*c, kind)) - } - None => None, - } - }).unwrap_or(0) - } - /// Make a request to a peer. /// /// Fails on: nonexistent peer, network error, peer not server, /// insufficient credits. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. - pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { + pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, requests: Requests) -> Result { let peers = self.peers.read(); - let peer = peers.get(peer_id).ok_or_else(|| Error::UnknownPeer)?; - let mut peer = peer.lock(); - - match peer.remote_flow { - Some((ref mut c, ref flow)) => { - flow.recharge(c); - let max = flow.compute_cost(request.kind(), request.amount()); - c.deduct_cost(max)?; - } - None => return Err(Error::NotServer), - } - - let req_id = self.req_id.fetch_add(1, Ordering::SeqCst); - let packet_data = encode_request(&request, req_id); - - trace!(target: "les", "Dispatching request {} to peer {}", req_id, peer_id); - - let packet_id = match request.kind() { - request::Kind::Headers => packet::GET_BLOCK_HEADERS, - request::Kind::Bodies => packet::GET_BLOCK_BODIES, - request::Kind::Receipts => packet::GET_RECEIPTS, - request::Kind::StateProofs => packet::GET_PROOFS, - request::Kind::Codes => packet::GET_CONTRACT_CODES, - request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS, - request::Kind::TransactionProof => packet::GET_TRANSACTION_PROOF, + let peer = match peers.get(peer_id) { + Some(peer) => peer, + None => return Err(Error::UnknownPeer), }; - io.send(*peer_id, packet_id, packet_data); + let mut peer = peer.lock(); + let peer = &mut *peer; + match peer.remote_flow { + None => Err(Error::NotServer), + Some((ref mut creds, ref params)) => { + // check that enough credits are available. + let mut temp_creds: Credits = creds.clone(); + for request in requests.requests() { + temp_creds.deduct_cost(params.compute_cost(request))?; + } + *creds = temp_creds; - peer.pending_requests.insert(ReqId(req_id), request, SteadyTime::now()); + let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); + io.send(*peer_id, packet::REQUEST, { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id.0).append_list(&requests.requests()); + stream.out() + }); - Ok(ReqId(req_id)) + // begin timeout. + peer.pending_requests.insert(req_id, requests, SteadyTime::now()); + Ok(req_id) + } + } } /// Make an announcement of new chain head and capabilities to all peers. @@ -427,7 +354,7 @@ impl LightProtocol { None => { // both values will always originate locally -- this means something // has gone really wrong - debug!(target: "les", "couldn't compute reorganization depth between {:?} and {:?}", + debug!(target: "pip", "couldn't compute reorganization depth between {:?} and {:?}", &announcement.head_hash, &peer_info.sent_head); 0 } @@ -470,85 +397,52 @@ impl LightProtocol { // - check whether peer exists // - check whether request was made // - check whether request kinds match - fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result { + fn pre_verify_response(&self, peer: &PeerId, raw: &UntrustedRlp) -> Result { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind); + trace!(target: "pip", "pre-verifying response from peer {}", peer); - let mut had_req = false; let peers = self.peers.read(); - let maybe_err = match peers.get(peer) { + let res = match peers.get(peer) { Some(peer_info) => { let mut peer_info = peer_info.lock(); let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); let flow_info = peer_info.remote_flow.as_mut(); match (req_info, flow_info) { - (Some(request), Some(flow_info)) => { - had_req = true; - + (Some(_), Some(flow_info)) => { let &mut (ref mut c, ref mut flow) = flow_info; let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); c.update_to(actual_credits); - if request.kind() != kind { - Some(Error::UnsolicitedResponse) - } else { - None - } + Ok(()) } - (None, _) => Some(Error::UnsolicitedResponse), - (_, None) => Some(Error::NotServer), // really should be impossible. + (None, _) => Err(Error::UnsolicitedResponse), + (_, None) => Err(Error::NotServer), // really should be impossible. } } - None => Some(Error::UnknownPeer), // probably only occurs in a race of some kind. + None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind. }; - if had_req { - let id_guard = IdGuard::new(peers, *peer, req_id); - match maybe_err { - Some(err) => Err(err), - None => Ok(id_guard) - } - } else { - Err(maybe_err.expect("every branch without a request leads to error; qed")) - } + res.map(|_| IdGuard::new(peers, *peer, req_id)) } - /// Handle an LES packet using the given io context. + /// Handle a packet using the given io context. /// Packet data is _untrusted_, which means that invalid data won't lead to /// issues. pub fn handle_packet(&self, io: &IoContext, peer: &PeerId, packet_id: u8, data: &[u8]) { let rlp = UntrustedRlp::new(data); - trace!(target: "les", "Incoming packet {} from peer {}", packet_id, peer); + trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer); // handle the packet let res = match packet_id { packet::STATUS => self.status(peer, io, rlp), packet::ANNOUNCE => self.announcement(peer, io, rlp), - packet::GET_BLOCK_HEADERS => self.get_block_headers(peer, io, rlp), - packet::BLOCK_HEADERS => self.block_headers(peer, io, rlp), - - packet::GET_BLOCK_BODIES => self.get_block_bodies(peer, io, rlp), - packet::BLOCK_BODIES => self.block_bodies(peer, io, rlp), - - packet::GET_RECEIPTS => self.get_receipts(peer, io, rlp), - packet::RECEIPTS => self.receipts(peer, io, rlp), - - packet::GET_PROOFS => self.get_proofs(peer, io, rlp), - packet::PROOFS => self.proofs(peer, io, rlp), - - packet::GET_CONTRACT_CODES => self.get_contract_code(peer, io, rlp), - packet::CONTRACT_CODES => self.contract_code(peer, io, rlp), - - packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), - packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), - - packet::GET_TRANSACTION_PROOF => self.get_transaction_proof(peer, io, rlp), - packet::TRANSACTION_PROOF => self.transaction_proof(peer, io, rlp), + packet::REQUEST => self.request(peer, io, rlp), + packet::RESPONSE => self.response(peer, io, rlp), packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), @@ -577,7 +471,7 @@ impl LightProtocol { .collect(); for slowpoke in slowpokes { - debug!(target: "les", "Peer {} handshake timed out", slowpoke); + debug!(target: "pip", "Peer {} handshake timed out", slowpoke); pending.remove(&slowpoke); io.disconnect_peer(slowpoke); } @@ -587,7 +481,7 @@ impl LightProtocol { { for (peer_id, peer) in self.peers.read().iter() { if peer.lock().pending_requests.check_timeout(now) { - debug!(target: "les", "Peer {} request timeout", peer_id); + debug!(target: "pip", "Peer {} request timeout", peer_id); io.disconnect_peer(*peer_id); } } @@ -631,7 +525,7 @@ impl LightProtocol { /// called when a peer disconnects. pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) { - trace!(target: "les", "Peer {} disconnecting", peer); + trace!(target: "pip", "Peer {} disconnecting", peer); self.pending_peers.write().remove(&peer); let unfulfilled = match self.peers.write().remove(&peer) { @@ -686,7 +580,7 @@ impl LightProtocol { let (status, capabilities, flow_params) = status::parse_handshake(data)?; - trace!(target: "les", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); + trace!(target: "pip", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) { return Err(Error::WrongNetwork); @@ -723,7 +617,7 @@ impl LightProtocol { // Handle an announcement. fn announcement(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { if !self.peers.read().contains_key(peer) { - debug!(target: "les", "Ignoring announcement from unknown peer"); + debug!(target: "pip", "Ignoring announcement from unknown peer"); return Ok(()) } @@ -765,513 +659,80 @@ impl LightProtocol { Ok(()) } - // Handle a request for block headers. - fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_HEADERS: usize = 512; + // Receive requests from a peer. + fn request(&self, peer_id: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + // the maximum amount of requests we'll fill in a single packet. + const MAX_REQUESTS: usize = 256; + + use ::request::RequestBuilder; + use ::request::CompleteRequest; let peers = self.peers.read(); - let peer = match peers.get(peer) { + let peer = match peers.get(peer_id) { Some(peer) => peer, None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - let data = data.at(1)?; - - let start_block = { - if data.at(0)?.size() == 32 { - HashOrNumber::Hash(data.val_at(0)?) - } else { - HashOrNumber::Number(data.val_at(0)?) - } - }; - - let req = request::Headers { - start: start_block, - max: ::std::cmp::min(MAX_HEADERS, data.val_at(1)?), - skip: data.val_at(2)?, - reverse: data.val_at(3)?, - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Headers, req.max)?; - - let response = self.provider.block_headers(req); - let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len()); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - io.respond(packet::BLOCK_HEADERS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for header in response { - stream.append_raw(&header.into_inner(), 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for block headers. - fn block_headers(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Headers, &raw)?; - let raw_headers: Vec<_> = raw.at(2)?.iter().map(|x| x.as_raw().to_owned()).collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_block_headers(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_headers); - } - - Ok(()) - } - - // Handle a request for block bodies. - fn get_block_bodies(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_BODIES: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = request::Bodies { - block_hashes: data.at(1)?.iter() - .take(MAX_BODIES) - .map(|x| x.as_val()) - .collect::>()? - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Bodies, req.block_hashes.len())?; - - let response = self.provider.block_bodies(req); - let response_len = response.iter().filter(|x| x.is_some()).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::BLOCK_BODIES, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for body in response { - match body { - Some(body) => stream.append_raw(&body.into_inner(), 1), - None => stream.append_empty_data(), - }; - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for block bodies. - fn block_bodies(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Bodies, &raw)?; - let raw_bodies: Vec = raw.at(2)?.iter().map(|x| x.as_raw().to_owned()).collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_block_bodies(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_bodies); - } - - Ok(()) - } - - // Handle a request for receipts. - fn get_receipts(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_RECEIPTS: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = request::Receipts { - block_hashes: data.at(1)?.iter() - .take(MAX_RECEIPTS) - .map(|x| x.as_val()) - .collect::>()? - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Receipts, req.block_hashes.len())?; - - let response = self.provider.receipts(req); - let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::RECEIPTS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for receipts in response { - stream.append_raw(&receipts, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for receipts. - fn receipts(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Receipts, &raw)?; - let raw_receipts: Vec> = raw.at(2)? - .iter() - .map(|x| x.as_list()) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_receipts(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_receipts); - } - - Ok(()) - } - - // Handle a request for proofs. - fn get_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_PROOFS: usize = 128; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_PROOFS).map(|x| { - Ok(request::StateProof { - block: x.val_at(0)?, - key1: x.val_at(1)?, - key2: if x.at(2)?.is_empty() { None } else { Some(x.val_at(2)?) }, - from_level: x.val_at(3)?, - }) - }).collect(); - - request::StateProofs { - requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::StateProofs, req.requests.len())?; - - let response = self.provider.proofs(req); - let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::PROOFS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for proof in response { - stream.append_raw(&proof, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for proofs. - fn proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::StateProofs, &raw)?; - - let raw_proofs: Vec> = raw.at(2)?.iter() - .map(|x| x.iter().map(|node| node.as_raw().to_owned()).collect()) - .collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_state_proofs(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proofs); - } - - Ok(()) - } - - // Handle a request for contract code. - fn get_contract_code(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_CODES: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_CODES).map(|x| { - Ok(request::ContractCode { - block_hash: x.val_at(0)?, - account_key: x.val_at(1)?, - }) - }).collect(); - - request::ContractCodes { - code_requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Codes, req.code_requests.len())?; - - let response = self.provider.contract_codes(req); - let response_len = response.iter().filter(|x| !x.is_empty()).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::CONTRACT_CODES, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for code in response { - stream.append(&code); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for contract code. - fn contract_code(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Codes, &raw)?; - - let raw_code: Vec = raw.at(2)?.iter() - .map(|x| x.as_val()) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_code(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_code); - } - - Ok(()) - } - - // Handle a request for header proofs - fn get_header_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_PROOFS: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_PROOFS).map(|x| { - Ok(request::HeaderProof { - cht_number: x.val_at(0)?, - block_number: x.val_at(1)?, - from_level: x.val_at(2)?, - }) - }).collect(); - - request::HeaderProofs { - requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::HeaderProofs, req.requests.len())?; - - let response = self.provider.header_proofs(req); - let response_len = response.iter().filter(|x| &x[..] != ::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::HEADER_PROOFS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for proof in response { - stream.append_raw(&proof, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for header proofs - fn header_proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - fn decode_res(raw: UntrustedRlp) -> Result<(Bytes, Vec), ::rlp::DecoderError> { - Ok(( - raw.val_at(0)?, - raw.at(1)?.iter().map(|x| x.as_raw().to_owned()).collect(), - )) - } - - let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; - let raw_proofs: Vec<_> = raw.at(2)?.iter() - .map(decode_res) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_header_proofs(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proofs); - } - - Ok(()) - } - - // Receive a request for proof-of-execution. - fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - // refuse to execute more than this amount of gas at once. - // this is appx. the point at which the proof of execution would no longer fit in - // a single Devp2p packet. - const MAX_GAS: usize = 50_000_000; - use util::Uint; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); + debug!(target: "pip", "Ignoring request from unknown peer"); return Ok(()) } }; let mut peer = peer.lock(); let req_id: u64 = raw.val_at(0)?; + let mut request_builder = RequestBuilder::default(); - let req = { - let req_rlp = raw.at(1)?; - request::TransactionProof { - at: req_rlp.val_at(0)?, - from: req_rlp.val_at(1)?, - action: if req_rlp.at(2)?.is_empty() { - Action::Create - } else { - Action::Call(req_rlp.val_at(2)?) - }, - gas: ::std::cmp::min(req_rlp.val_at(3)?, MAX_GAS.into()), - gas_price: req_rlp.val_at(4)?, - value: req_rlp.val_at(5)?, - data: req_rlp.val_at(6)?, + trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); + + // deserialize requests, check costs and request validity. + peer.local_credits.deduct_cost(self.flow_params.base_cost())?; + for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { + let request: Request = request_rlp.as_val()?; + peer.local_credits.deduct_cost(self.flow_params.compute_cost(&request))?; + request_builder.push(request).map_err(|_| Error::BadBackReference)?; + } + + let requests = request_builder.build(); + let num_requests = requests.requests().len(); + trace!(target: "pip", "Beginning to respond to requests (id: {}) from peer {}", req_id, peer_id); + + // respond to all requests until one fails. + let responses = requests.respond_to_all(|complete_req| { + match complete_req { + CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers), + CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof), + CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body), + CompleteRequest::Receipts(req) => self.provider.block_receipts(req).map(Response::Receipts), + CompleteRequest::Account(req) => self.provider.account_proof(req).map(Response::Account), + CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage), + CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code), + CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution), } - }; - - // always charge the peer for all the gas. - peer.deduct_max(&self.flow_params, request::Kind::TransactionProof, req.gas.low_u64() as usize)?; - - let response = match self.provider.transaction_proof(req) { - Some(res) => res, - None => vec![], - }; - - let cur_credits = peer.local_credits.current(); - - io.respond(packet::TRANSACTION_PROOF, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for state_item in response { - stream.append(&&state_item[..]); - } - - stream.out() }); + trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); + + io.respond(packet::RESPONSE, { + let mut stream = RlpStream::new_list(3); + let cur_credits = peer.local_credits.current(); + stream.append(&req_id).append(&cur_credits).append_list(&responses); + stream.out() + }); Ok(()) } - // Receive a response for proof-of-execution. - fn transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; - let raw_proof: Vec = raw.at(2)?.iter() - .map(|rlp| { - let mut db_val = DBValue::new(); - db_val.append_slice(rlp.data()?); - Ok(db_val) - }) - .collect::, ::rlp::DecoderError>>()?; + // handle a packet with responses. + fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let (req_id, responses) = { + let id_guard = self.pre_verify_response(peer, &raw)?; + let responses: Vec = raw.list_at(2)?; + (id_guard.defuse(), responses) + }; - let req_id = id_guard.defuse(); for handler in &self.handlers { - handler.on_transaction_proof(&Ctx { - peer: *peer, + handler.on_responses(&Ctx { io: io, proto: self, - }, req_id, &raw_proof); + peer: *peer, + }, req_id, &responses); } Ok(()) @@ -1286,7 +747,7 @@ impl LightProtocol { .map(|x| x.as_val::()) .collect::>()?; - debug!(target: "les", "Received {} transactions to relay from peer {}", txs.len(), peer); + debug!(target: "pip", "Received {} transactions to relay from peer {}", txs.len(), peer); for handler in &self.handlers { handler.on_transactions(&Ctx { @@ -1305,11 +766,11 @@ fn punish(peer: PeerId, io: &IoContext, e: Error) { match e.punishment() { Punishment::None => {} Punishment::Disconnect => { - debug!(target: "les", "Disconnecting peer {}: {}", peer, e); + debug!(target: "pip", "Disconnecting peer {}: {}", peer, e); io.disconnect_peer(peer) } Punishment::Disable => { - debug!(target: "les", "Disabling peer {}: {}", peer, e); + debug!(target: "pip", "Disabling peer {}: {}", peer, e); io.disable_peer(peer) } } @@ -1339,112 +800,7 @@ impl NetworkProtocolHandler for LightProtocol { match timer { TIMEOUT => self.timeout_check(io), TICK_TIMEOUT => self.tick_handlers(io), - _ => warn!(target: "les", "received timeout on unknown token {}", timer), - } - } -} - -// Helper for encoding the request to RLP with the given ID. -fn encode_request(req: &Request, req_id: usize) -> Vec { - match *req { - Request::Headers(ref headers) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(4); - - match headers.start { - HashOrNumber::Hash(ref hash) => stream.append(hash), - HashOrNumber::Number(ref num) => stream.append(num), - }; - - stream - .append(&headers.max) - .append(&headers.skip) - .append(&headers.reverse); - - stream.out() - } - Request::Bodies(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.block_hashes.len()); - - for hash in &request.block_hashes { - stream.append(hash); - } - - stream.out() - } - Request::Receipts(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.block_hashes.len()); - - for hash in &request.block_hashes { - stream.append(hash); - } - - stream.out() - } - Request::StateProofs(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.requests.len()); - - for proof_req in &request.requests { - stream.begin_list(4) - .append(&proof_req.block) - .append(&proof_req.key1); - - match proof_req.key2 { - Some(ref key2) => stream.append(key2), - None => stream.append_empty_data(), - }; - - stream.append(&proof_req.from_level); - } - - stream.out() - } - Request::Codes(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.code_requests.len()); - - for code_req in &request.code_requests { - stream.begin_list(2) - .append(&code_req.block_hash) - .append(&code_req.account_key); - } - - stream.out() - } - Request::HeaderProofs(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.requests.len()); - - for proof_req in &request.requests { - stream.begin_list(3) - .append(&proof_req.cht_number) - .append(&proof_req.block_number) - .append(&proof_req.from_level); - } - - stream.out() - } - Request::TransactionProof(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(7) - .append(&request.at) - .append(&request.from); - - match request.action { - Action::Create => stream.append_empty_data(), - Action::Call(ref to) => stream.append(to), - }; - - stream - .append(&request.gas) - .append(&request.gas_price) - .append(&request.value) - .append(&request.data); - - stream.out() + _ => warn!(target: "pip", "received timeout on unknown token {}", timer), } } } diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index f35c6662f..e505b293c 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -26,18 +26,13 @@ //! Current default costs are picked completely arbitrarily, not based //! on any empirical timings or mathematical models. -use request; -use super::packet; +use request::{self, Request}; use super::error::Error; use rlp::*; use util::U256; use time::{Duration, SteadyTime}; -/// A request cost specification. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Cost(pub U256, pub U256); - /// Credits value. /// /// Produced and recharged using `FlowParams`. @@ -81,90 +76,95 @@ impl Credits { /// A cost table, mapping requests to base and per-request costs. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CostTable { - headers: Cost, // cost per header - bodies: Cost, - receipts: Cost, - state_proofs: Cost, - contract_codes: Cost, - header_proofs: Cost, - transaction_proof: Cost, // cost per gas. + base: U256, // cost per packet. + headers: U256, // cost per header + body: U256, + receipts: U256, + account: U256, + storage: U256, + code: U256, + header_proof: U256, + transaction_proof: U256, // cost per gas. } impl Default for CostTable { fn default() -> Self { // arbitrarily chosen constants. CostTable { - headers: Cost(100000.into(), 10000.into()), - bodies: Cost(150000.into(), 15000.into()), - receipts: Cost(50000.into(), 5000.into()), - state_proofs: Cost(250000.into(), 25000.into()), - contract_codes: Cost(200000.into(), 20000.into()), - header_proofs: Cost(150000.into(), 15000.into()), - transaction_proof: Cost(100000.into(), 2.into()), + base: 100000.into(), + headers: 10000.into(), + body: 15000.into(), + receipts: 5000.into(), + account: 25000.into(), + storage: 25000.into(), + code: 20000.into(), + header_proof: 15000.into(), + transaction_proof: 2.into(), } } } impl Encodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { - fn append_cost(s: &mut RlpStream, msg_id: u8, cost: &Cost) { - s.begin_list(3) - .append(&msg_id) - .append(&cost.0) - .append(&cost.1); + fn append_cost(s: &mut RlpStream, cost: &U256, kind: request::Kind) { + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&kind, s); + s.append(cost); } - s.begin_list(7); - - append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers); - append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies); - append_cost(s, packet::GET_RECEIPTS, &self.receipts); - append_cost(s, packet::GET_PROOFS, &self.state_proofs); - append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes); - append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs); - append_cost(s, packet::GET_TRANSACTION_PROOF, &self.transaction_proof); + s.begin_list(9).append(&self.base); + append_cost(s, &self.headers, request::Kind::Headers); + append_cost(s, &self.body, request::Kind::Body); + append_cost(s, &self.receipts, request::Kind::Receipts); + append_cost(s, &self.account, request::Kind::Account); + append_cost(s, &self.storage, request::Kind::Storage); + append_cost(s, &self.code, request::Kind::Code); + append_cost(s, &self.header_proof, request::Kind::HeaderProof); + append_cost(s, &self.transaction_proof, request::Kind::Execution); } } impl Decodable for CostTable { fn decode(rlp: &UntrustedRlp) -> Result { + let base = rlp.val_at(0)?; + let mut headers = None; - let mut bodies = None; + let mut body = None; let mut receipts = None; - let mut state_proofs = None; - let mut contract_codes = None; - let mut header_proofs = None; + let mut account = None; + let mut storage = None; + let mut code = None; + let mut header_proof = None; let mut transaction_proof = None; - for row in rlp.iter() { - let msg_id: u8 = row.val_at(0)?; - let cost = { - let base = row.val_at(1)?; - let per = row.val_at(2)?; - - Cost(base, per) - }; - - match msg_id { - packet::GET_BLOCK_HEADERS => headers = Some(cost), - packet::GET_BLOCK_BODIES => bodies = Some(cost), - packet::GET_RECEIPTS => receipts = Some(cost), - packet::GET_PROOFS => state_proofs = Some(cost), - packet::GET_CONTRACT_CODES => contract_codes = Some(cost), - packet::GET_HEADER_PROOFS => header_proofs = Some(cost), - packet::GET_TRANSACTION_PROOF => transaction_proof = Some(cost), - _ => return Err(DecoderError::Custom("Unrecognized message in cost table")), + for cost_list in rlp.iter().skip(1) { + let cost = cost_list.val_at(1)?; + match cost_list.val_at(0)? { + request::Kind::Headers => headers = Some(cost), + request::Kind::Body => body = Some(cost), + request::Kind::Receipts => receipts = Some(cost), + request::Kind::Account => account = Some(cost), + request::Kind::Storage => storage = Some(cost), + request::Kind::Code => code = Some(cost), + request::Kind::HeaderProof => header_proof = Some(cost), + request::Kind::Execution => transaction_proof = Some(cost), } } + let unwrap_cost = |cost: Option| cost.ok_or(DecoderError::Custom("Not all costs specified in cost table.")); + Ok(CostTable { - headers: headers.ok_or(DecoderError::Custom("No headers cost specified"))?, - bodies: bodies.ok_or(DecoderError::Custom("No bodies cost specified"))?, - receipts: receipts.ok_or(DecoderError::Custom("No receipts cost specified"))?, - state_proofs: state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))?, - contract_codes: contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))?, - header_proofs: header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))?, - transaction_proof: transaction_proof.ok_or(DecoderError::Custom("No transaction proof gas cost specified"))?, + base: base, + headers: unwrap_cost(headers)?, + body: unwrap_cost(body)?, + receipts: unwrap_cost(receipts)?, + account: unwrap_cost(account)?, + storage: unwrap_cost(storage)?, + code: unwrap_cost(code)?, + header_proof: unwrap_cost(header_proof)?, + transaction_proof: unwrap_cost(transaction_proof)?, }) } } @@ -190,17 +190,19 @@ impl FlowParams { /// Create effectively infinite flow params. pub fn free() -> Self { - let free_cost = Cost(0.into(), 0.into()); + let free_cost: U256 = 0.into(); FlowParams { limit: (!0u64).into(), recharge: 1.into(), costs: CostTable { + base: free_cost.clone(), headers: free_cost.clone(), - bodies: free_cost.clone(), + body: free_cost.clone(), receipts: free_cost.clone(), - state_proofs: free_cost.clone(), - contract_codes: free_cost.clone(), - header_proofs: free_cost.clone(), + account: free_cost.clone(), + storage: free_cost.clone(), + code: free_cost.clone(), + header_proof: free_cost.clone(), transaction_proof: free_cost, } } @@ -212,61 +214,34 @@ impl FlowParams { /// Get a reference to the cost table. pub fn cost_table(&self) -> &CostTable { &self.costs } + /// Get the base cost of a request. + pub fn base_cost(&self) -> U256 { self.costs.base } + /// Get a reference to the recharge rate. pub fn recharge_rate(&self) -> &U256 { &self.recharge } /// Compute the actual cost of a request, given the kind of request /// and number of requests made. - pub fn compute_cost(&self, kind: request::Kind, amount: usize) -> U256 { - let cost = match kind { - request::Kind::Headers => &self.costs.headers, - request::Kind::Bodies => &self.costs.bodies, - request::Kind::Receipts => &self.costs.receipts, - request::Kind::StateProofs => &self.costs.state_proofs, - request::Kind::Codes => &self.costs.contract_codes, - request::Kind::HeaderProofs => &self.costs.header_proofs, - request::Kind::TransactionProof => &self.costs.transaction_proof, - }; - - let amount: U256 = amount.into(); - cost.0 + (amount * cost.1) - } - - /// Compute the maximum number of costs of a specific kind which can be made - /// with the given amount of credits - /// Saturates at `usize::max()`. This is not a problem in practice because - /// this amount of requests is already prohibitively large. - pub fn max_amount(&self, credits: &Credits, kind: request::Kind) -> usize { - use util::Uint; - use std::usize; - - let cost = match kind { - request::Kind::Headers => &self.costs.headers, - request::Kind::Bodies => &self.costs.bodies, - request::Kind::Receipts => &self.costs.receipts, - request::Kind::StateProofs => &self.costs.state_proofs, - request::Kind::Codes => &self.costs.contract_codes, - request::Kind::HeaderProofs => &self.costs.header_proofs, - request::Kind::TransactionProof => &self.costs.transaction_proof, - }; - - let start = credits.current(); - - if start <= cost.0 { - return 0; - } else if cost.1 == U256::zero() { - return usize::MAX; - } - - let max = (start - cost.0) / cost.1; - if max >= usize::MAX.into() { - usize::MAX - } else { - max.as_u64() as usize + pub fn compute_cost(&self, request: &Request) -> U256 { + match *request { + Request::Headers(ref req) => self.costs.headers * req.max.into(), + Request::HeaderProof(_) => self.costs.header_proof, + Request::Body(_) => self.costs.body, + Request::Receipts(_) => self.costs.receipts, + Request::Account(_) => self.costs.account, + Request::Storage(_) => self.costs.storage, + Request::Code(_) => self.costs.code, + Request::Execution(ref req) => self.costs.transaction_proof * req.gas, } } - /// Create initial credits.. + /// Compute the cost of a set of requests. + /// This is the base cost plus the cost of each individual request. + pub fn compute_cost_multi(&self, requests: &[Request]) -> U256 { + requests.iter().fold(self.costs.base, |cost, req| cost + self.compute_cost(req)) + } + + /// Create initial credits. pub fn create_credits(&self) -> Credits { Credits { estimate: self.limit, diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index e6d4068da..a2391ef6f 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -24,7 +24,8 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; -use request::{self, Request}; +use request::Request; +use request::Requests; use net::{timeout, ReqId}; use time::{Duration, SteadyTime}; @@ -35,7 +36,7 @@ pub struct RequestSet { counter: u64, base: Option, ids: HashMap, - reqs: BTreeMap, + reqs: BTreeMap, } impl Default for RequestSet { @@ -50,8 +51,8 @@ impl Default for RequestSet { } impl RequestSet { - /// Push a request onto the stack. - pub fn insert(&mut self, req_id: ReqId, req: Request, now: SteadyTime) { + /// Push requests onto the stack. + pub fn insert(&mut self, req_id: ReqId, req: Requests, now: SteadyTime) { let counter = self.counter; self.ids.insert(req_id, counter); self.reqs.insert(counter, req); @@ -63,8 +64,8 @@ impl RequestSet { self.counter += 1; } - /// Remove a request from the stack. - pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option { + /// Remove a set of requests from the stack. + pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option { let id = match self.ids.remove(&req_id) { Some(id) => id, None => return None, @@ -89,22 +90,10 @@ impl RequestSet { None => return false, }; - let kind = self.reqs.values() - .next() - .map(|r| r.kind()) - .expect("base time implies `reqs` non-empty; qed"); + let first_req = self.reqs.values().next() + .expect("base existing implies `reqs` non-empty; qed"); - let kind_timeout = match kind { - request::Kind::Headers => timeout::HEADERS, - request::Kind::Bodies => timeout::BODIES, - request::Kind::Receipts => timeout::RECEIPTS, - request::Kind::StateProofs => timeout::PROOFS, - request::Kind::Codes => timeout::CONTRACT_CODES, - request::Kind::HeaderProofs => timeout::HEADER_PROOFS, - request::Kind::TransactionProof => timeout::TRANSACTION_PROOF, - }; - - base + Duration::milliseconds(kind_timeout) <= now + base + compute_timeout(&first_req) <= now } /// Collect all pending request ids. @@ -121,25 +110,43 @@ impl RequestSet { pub fn is_empty(&self) -> bool { self.len() == 0 } } +// helper to calculate timeout for a specific set of requests. +// it's a base amount + some amount per request. +fn compute_timeout(reqs: &Requests) -> Duration { + Duration::milliseconds(reqs.requests().iter().fold(timeout::BASE, |tm, req| { + tm + match *req { + Request::Headers(_) => timeout::HEADERS, + Request::HeaderProof(_) => timeout::HEADER_PROOF, + Request::Receipts(_) => timeout::RECEIPT, + Request::Body(_) => timeout::BODY, + Request::Account(_) => timeout::PROOF, + Request::Storage(_) => timeout::PROOF, + Request::Code(_) => timeout::CONTRACT_CODE, + Request::Execution(_) => timeout::TRANSACTION_PROOF, + } + })) +} + #[cfg(test)] mod tests { - use net::{timeout, ReqId}; - use request::{Request, Receipts}; + use net::ReqId; + use request::RequestBuilder; use time::{SteadyTime, Duration}; - use super::RequestSet; + use super::{RequestSet, compute_timeout}; #[test] fn multi_timeout() { let test_begin = SteadyTime::now(); let mut req_set = RequestSet::default(); - let the_req = Request::Receipts(Receipts { block_hashes: Vec::new() }); + let the_req = RequestBuilder::default().build(); + let req_time = compute_timeout(&the_req); req_set.insert(ReqId(0), the_req.clone(), test_begin); req_set.insert(ReqId(1), the_req, test_begin + Duration::seconds(1)); assert_eq!(req_set.base, Some(test_begin)); - let test_end = test_begin + Duration::milliseconds(timeout::RECEIPTS); + let test_end = test_begin + req_time; assert!(req_set.check_timeout(test_end)); req_set.remove(&ReqId(0), test_begin + Duration::seconds(1)).unwrap(); diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 6a9de1467..e2081534c 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -27,15 +27,31 @@ use network::{PeerId, NodeId}; use net::request_credits::FlowParams; use net::context::IoContext; use net::status::{Capabilities, Status, write_handshake}; -use net::{encode_request, LightProtocol, Params, packet, Peer}; +use net::{LightProtocol, Params, packet, Peer}; use provider::Provider; -use request::{self, Request, Headers}; +use request; +use request::*; use rlp::*; -use util::{Address, Bytes, DBValue, H256, U256}; +use util::{Address, H256, U256}; use std::sync::Arc; +// helper for encoding a single request into a packet. +// panics on bad backreference. +fn encode_single(request: Request) -> Requests { + let mut builder = RequestBuilder::default(); + builder.push(request).unwrap(); + builder.build() +} + +// helper for making a packet out of `Requests`. +fn make_packet(req_id: usize, requests: &Requests) -> Vec { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id).append_list(&requests.requests()); + stream.out() +} + // expected result from a call. #[derive(Debug, PartialEq, Eq)] enum Expect { @@ -99,35 +115,45 @@ impl Provider for TestProvider { self.0.client.block_header(id) } - fn block_body(&self, id: BlockId) -> Option { - self.0.client.block_body(id) + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + self.0.client.block_body(req) } - fn block_receipts(&self, hash: &H256) -> Option { - self.0.client.block_receipts(&hash) + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { + self.0.client.block_receipts(req) } - fn state_proof(&self, req: request::StateProof) -> Vec { - match req.key2 { - Some(_) => vec![::util::sha3::SHA3_NULL_RLP.to_vec()], - None => { - // sort of a leaf node - let mut stream = RlpStream::new_list(2); - stream.append(&req.key1).append_empty_data(); - vec![stream.out()] - } - } + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + // sort of a leaf node + let mut stream = RlpStream::new_list(2); + stream.append(&req.address_hash).append_empty_data(); + Some(AccountResponse { + proof: vec![stream.out()], + balance: 10.into(), + nonce: 100.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }) } - fn contract_code(&self, req: request::ContractCode) -> Bytes { - req.account_key.iter().chain(req.account_key.iter()).cloned().collect() + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + Some(StorageResponse { + proof: vec![::rlp::encode(&req.key_hash).to_vec()], + value: req.key_hash | req.address_hash, + }) } - fn header_proof(&self, _req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + Some(CodeResponse { + code: req.block_hash.iter().chain(req.code_hash.iter()).cloned().collect(), + }) + } + + fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { None } - fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { None } @@ -226,14 +252,15 @@ fn credit_overflow() { } // 1000 requests is far too many for the default flow params. - let request = encode_request(&Request::Headers(Headers { - start: 1.into(), + let requests = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), max: 1000, skip: 0, reverse: false, - }), 111); + })); + let request = make_packet(111, &requests); - proto.handle_packet(&Expect::Punish(1), &1, packet::GET_BLOCK_HEADERS, &request); + proto.handle_packet(&Expect::Punish(1), &1, packet::REQUEST, &request); } // test the basic request types -- these just make sure that requests are parsed @@ -259,33 +286,36 @@ fn get_block_headers() { proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } - let request = Headers { - start: 1.into(), + let request = Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), max: 10, skip: 0, reverse: false, - }; + }); + let req_id = 111; - let request_body = encode_request(&Request::Headers(request.clone()), req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(headers.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); - let mut response_stream = RlpStream::new_list(3); + let response = vec![Response::Headers(HeadersResponse { + headers: headers, + })]; - response_stream.append(&req_id).append(&new_creds).begin_list(10); - for header in headers { - response_stream.append_raw(&header.into_inner(), 1); - } + let mut stream = RlpStream::new_list(3); + stream.append(&req_id).append(&new_creds).append_list(&response); - response_stream.out() + stream.out() }; - let expected = Expect::Respond(packet::BLOCK_HEADERS, response); - proto.handle_packet(&expected, &1, packet::GET_BLOCK_HEADERS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -308,33 +338,32 @@ fn get_block_bodies() { proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } - let request = request::Bodies { - block_hashes: (0..10).map(|i| - provider.client.block_header(BlockId::Number(i)).unwrap().hash() - ).collect() - }; + let mut builder = RequestBuilder::default(); + let mut bodies = Vec::new(); + for i in 0..10 { + let hash = provider.client.block_header(BlockId::Number(i)).unwrap().hash(); + builder.push(Request::Body(IncompleteBodyRequest { + hash: hash.into(), + })).unwrap(); + bodies.push(Response::Body(provider.client.block_body(CompleteBodyRequest { + hash: hash, + }).unwrap())); + } let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let request_body = encode_request(&Request::Bodies(request.clone()), req_id); let response = { - let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect(); - assert_eq!(bodies.len(), 10); - - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(10); - for body in bodies { - response_stream.append_raw(&body.into_inner(), 1); - } - + response_stream.append(&req_id).append(&new_creds).append_list(&bodies); response_stream.out() }; - let expected = Expect::Respond(packet::BLOCK_BODIES, response); - proto.handle_packet(&expected, &1, packet::GET_BLOCK_BODIES, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -359,36 +388,37 @@ fn get_block_receipts() { // find the first 10 block hashes starting with `f` because receipts are only provided // by the test client in that case. - let block_hashes: Vec<_> = (0..1000).map(|i| - provider.client.block_header(BlockId::Number(i)).unwrap().hash() - ).filter(|hash| format!("{}", hash).starts_with("f")).take(10).collect(); + let block_hashes: Vec = (0..1000) + .map(|i| provider.client.block_header(BlockId::Number(i)).unwrap().hash()) + .filter(|hash| format!("{}", hash).starts_with("f")) + .take(10) + .collect(); - let request = request::Receipts { - block_hashes: block_hashes.clone(), - }; + let mut builder = RequestBuilder::default(); + let mut receipts = Vec::new(); + for hash in block_hashes.iter().cloned() { + builder.push(Request::Receipts(IncompleteReceiptsRequest { hash: hash.into() })).unwrap(); + receipts.push(Response::Receipts(provider.client.block_receipts(CompleteReceiptsRequest { + hash: hash + }).unwrap())); + } let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let request_body = encode_request(&Request::Receipts(request.clone()), req_id); let response = { - let receipts: Vec<_> = block_hashes.iter() - .map(|hash| provider.client.block_receipts(hash).unwrap()) - .collect(); + assert_eq!(receipts.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(receipts.len()); - for block_receipts in receipts { - response_stream.append_raw(&block_receipts, 1); - } - + response_stream.append(&req_id).append(&new_creds).append_list(&receipts); response_stream.out() }; - let expected = Expect::Respond(packet::RECEIPTS, response); - proto.handle_packet(&expected, &1, packet::GET_RECEIPTS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -397,8 +427,9 @@ fn get_state_proofs() { let capabilities = capabilities(); let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let provider = TestProvider(provider); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.0.client.chain_info()); { let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); @@ -407,40 +438,45 @@ fn get_state_proofs() { } let req_id = 112; - let key1 = U256::from(11223344).into(); - let key2 = U256::from(99988887).into(); + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let request = Request::StateProofs (request::StateProofs { - requests: vec![ - request::StateProof { block: H256::default(), key1: key1, key2: None, from_level: 0 }, - request::StateProof { block: H256::default(), key1: key1, key2: Some(key2), from_level: 0}, - ] - }); + let mut builder = RequestBuilder::default(); + builder.push(Request::Account(IncompleteAccountRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + })).unwrap(); + builder.push(Request::Storage(IncompleteStorageRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + key_hash: key2.into(), + })).unwrap(); - let request_body = encode_request(&request, req_id); + let requests = builder.build(); + + let request_body = make_packet(req_id, &requests); let response = { - let proofs = vec![ - { let mut stream = RlpStream::new_list(2); stream.append(&key1).append_empty_data(); vec![stream.out()] }, - vec![::util::sha3::SHA3_NULL_RLP.to_vec()], + let responses = vec![ + Response::Account(provider.account_proof(CompleteAccountRequest { + block_hash: H256::default(), + address_hash: key1, + }).unwrap()), + Response::Storage(provider.storage_proof(CompleteStorageRequest { + block_hash: H256::default(), + address_hash: key1, + key_hash: key2, + }).unwrap()), ]; - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(2); - for proof in proofs { - response_stream.begin_list(proof.len()); - for node in proof { - response_stream.append_raw(&node, 1); - } - } - + response_stream.append(&req_id).append(&new_creds).append_list(&responses); response_stream.out() }; - let expected = Expect::Respond(packet::PROOFS, response); - proto.handle_packet(&expected, &1, packet::GET_PROOFS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -459,37 +495,31 @@ fn get_contract_code() { } let req_id = 112; - let key1 = U256::from(11223344).into(); - let key2 = U256::from(99988887).into(); + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let request = Request::Codes (request::ContractCodes { - code_requests: vec![ - request::ContractCode { block_hash: H256::default(), account_key: key1 }, - request::ContractCode { block_hash: H256::default(), account_key: key2 }, - ], + let request = Request::Code(IncompleteCodeRequest { + block_hash: key1.into(), + code_hash: key2.into(), }); - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); let response = { - let codes: Vec> = vec![ - key1.iter().chain(key1.iter()).cloned().collect(), - key2.iter().chain(key2.iter()).cloned().collect(), - ]; + let response = vec![Response::Code(CodeResponse { + code: key1.iter().chain(key2.iter()).cloned().collect(), + })]; - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).begin_list(2); - for code in codes { - response_stream.append(&code); - } - + response_stream.append(&req_id).append(&new_creds).append_list(&response); response_stream.out() }; - let expected = Expect::Respond(packet::CONTRACT_CODES, response); - proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -508,8 +538,8 @@ fn proof_of_execution() { } let req_id = 112; - let mut request = Request::TransactionProof (request::TransactionProof { - at: H256::default(), + let mut request = Request::Execution(request::IncompleteExecutionRequest { + block_hash: H256::default().into(), from: Address::default(), action: Action::Call(Address::default()), gas: 100.into(), @@ -519,9 +549,11 @@ fn proof_of_execution() { }); // first: a valid amount to request execution of. - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::TransactionProof, 100); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); response_stream.append(&req_id).append(&new_creds).begin_list(0); @@ -529,17 +561,19 @@ fn proof_of_execution() { response_stream.out() }; - let expected = Expect::Respond(packet::TRANSACTION_PROOF, response); - proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); // next: way too much requested gas. - if let Request::TransactionProof(ref mut req) = request { + if let Request::Execution(ref mut req) = request { req.gas = 100_000_000.into(); } let req_id = 113; - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let expected = Expect::Punish(1); - proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -554,12 +588,13 @@ fn id_guard() { let req_id_1 = ReqId(5143); let req_id_2 = ReqId(1111); - let req = Request::Headers(request::Headers { - start: 5u64.into(), + + let req = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(5u64).into(), max: 100, skip: 0, reverse: false, - }); + })); let peer_id = 9876; @@ -579,15 +614,15 @@ fn id_guard() { failed_requests: Vec::new(), })); - // first, supply wrong request type. + // first, malformed responses. { let mut stream = RlpStream::new_list(3); stream.append(&req_id_1.0); stream.append(&4_000_000usize); - stream.begin_list(0); + stream.begin_list(2).append(&125usize).append(&3usize); let packet = stream.out(); - assert!(proto.block_bodies(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); } // next, do an unexpected response. @@ -598,7 +633,7 @@ fn id_guard() { stream.begin_list(0); let packet = stream.out(); - assert!(proto.receipts(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); } // lastly, do a valid (but empty) response. @@ -609,7 +644,7 @@ fn id_guard() { stream.begin_list(0); let packet = stream.out(); - assert!(proto.block_headers(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_ok()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_ok()); } let peers = proto.peers.read(); diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 488535568..8d451c88e 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -34,12 +34,12 @@ use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::RlpStream; -use util::{Bytes, DBValue, RwLock, Mutex, U256}; +use util::{Bytes, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; -use types::les_request::{self as les_request, Request as LesRequest}; +use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; pub mod request; @@ -49,24 +49,85 @@ struct Peer { capabilities: Capabilities, } +impl Peer { + // Whether a given peer can handle a specific request. + fn can_handle(&self, pending: &Pending) -> bool { + match *pending { + Pending::HeaderProof(ref req, _) => + self.capabilities.serve_headers && self.status.head_num > req.num(), + Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, + Pending::Block(ref req, _) => + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), + Pending::BlockReceipts(ref req, _) => + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.0.number()), + Pending::Account(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + Pending::Code(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.block_id.1), + Pending::TxProof(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + } + } +} + // Which portions of a CHT proof should be sent. enum ChtProofSender { - Both(Sender<(encoded::Header, U256)>), - Header(Sender), + Both(Sender<(H256, U256)>), + Hash(Sender), ChainScore(Sender), } // Attempted request info and sender to put received value. enum Pending { - HeaderByNumber(request::HeaderByNumber, ChtProofSender), + HeaderProof(request::HeaderProof, ChtProofSender), HeaderByHash(request::HeaderByHash, Sender), Block(request::Body, Sender), BlockReceipts(request::BlockReceipts, Sender>), - Account(request::Account, Sender), + Account(request::Account, Sender>), Code(request::Code, Sender), TxProof(request::TransactionProof, Sender>), } +impl Pending { + // Create a network request. + fn make_request(&self) -> NetworkRequest { + match *self { + Pending::HeaderByHash(ref req, _) => NetworkRequest::Headers(basic_request::IncompleteHeadersRequest { + start: basic_request::HashOrNumber::Hash(req.0).into(), + skip: 0, + max: 1, + reverse: false, + }), + Pending::HeaderProof(ref req, _) => NetworkRequest::HeaderProof(basic_request::IncompleteHeaderProofRequest { + num: req.num().into(), + }), + Pending::Block(ref req, _) => NetworkRequest::Body(basic_request::IncompleteBodyRequest { + hash: req.hash.into(), + }), + Pending::BlockReceipts(ref req, _) => NetworkRequest::Receipts(basic_request::IncompleteReceiptsRequest { + hash: req.0.hash().into(), + }), + Pending::Account(ref req, _) => NetworkRequest::Account(basic_request::IncompleteAccountRequest { + block_hash: req.header.hash().into(), + address_hash: ::util::Hashable::sha3(&req.address).into(), + }), + Pending::Code(ref req, _) => NetworkRequest::Code(basic_request::IncompleteCodeRequest { + block_hash: req.block_id.0.into(), + code_hash: req.code_hash.into(), + }), + Pending::TxProof(ref req, _) => NetworkRequest::Execution(basic_request::IncompleteExecutionRequest { + block_hash: req.header.hash().into(), + from: req.tx.sender(), + gas: req.tx.gas, + gas_price: req.tx.gas_price, + action: req.tx.action.clone(), + value: req.tx.value, + data: req.tx.data.clone(), + }), + } + } +} + /// On demand request service. See module docs for more details. /// Accumulates info about all peers' capabilities and dispatches /// requests to them accordingly. @@ -90,25 +151,25 @@ impl OnDemand { } } - /// Request a header by block number and CHT root hash. - /// Returns the header. - pub fn header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver { + /// Request a header's hash by block number and CHT root hash. + /// Returns the hash. + pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); - cache.block_hash(&req.num()).and_then(|hash| cache.block_header(&hash)) + cache.block_hash(&req.num()) }; match cached { - Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), - None => self.dispatch_header_by_number(ctx, req, ChtProofSender::Header(sender)), + Some(hash) => sender.send(hash).expect(RECEIVER_IN_SCOPE), + None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), } receiver } /// Request a canonical block's chain score. /// Returns the chain score. - pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver { + pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); @@ -117,71 +178,33 @@ impl OnDemand { match cached { Some(score) => sender.send(score).expect(RECEIVER_IN_SCOPE), - None => self.dispatch_header_by_number(ctx, req, ChtProofSender::ChainScore(sender)), + None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), } receiver } - /// Request a canonical block's chain score. - /// Returns the header and chain score. - pub fn header_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver<(encoded::Header, U256)> { + /// Request a canonical block's hash and chain score by number. + /// Returns the hash and chain score. + pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<(H256, U256)> { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); let hash = cache.block_hash(&req.num()); ( - hash.clone().and_then(|hash| cache.block_header(&hash)), + hash.clone(), hash.and_then(|hash| cache.chain_score(&hash)), ) }; match cached { - (Some(hdr), Some(score)) => sender.send((hdr, score)).expect(RECEIVER_IN_SCOPE), - _ => self.dispatch_header_by_number(ctx, req, ChtProofSender::Both(sender)), + (Some(hash), Some(score)) => sender.send((hash, score)).expect(RECEIVER_IN_SCOPE), + _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), } receiver } - // dispatch the request, completing the request if no peers available. - fn dispatch_header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber, sender: ChtProofSender) { - let num = req.num(); - let cht_num = req.cht_num(); - - let les_req = LesRequest::HeaderProofs(les_request::HeaderProofs { - requests: vec![les_request::HeaderProof { - cht_number: cht_num, - block_number: num, - from_level: 0, - }], - }); - - let pending = Pending::HeaderByNumber(req, sender); - - // we're looking for a peer with serveHeaders who's far enough along in the - // chain. - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_headers && peer.status.head_num >= num { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - }, - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request a header by hash. This is less accurate than by-number because we don't know /// where in the chain this header lies, and therefore can't find a peer who is supposed to have /// it as easily. @@ -189,50 +212,11 @@ impl OnDemand { let (sender, receiver) = oneshot::channel(); match self.cache.lock().block_header(&req.0) { Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), - None => self.dispatch_header_by_hash(ctx, req, sender), + None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } receiver } - fn dispatch_header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash, sender: Sender) { - let les_req = LesRequest::Headers(les_request::Headers { - start: req.0.into(), - max: 1, - skip: 0, - reverse: false, - }); - - // all we've got is a hash, so we'll just guess at peers who might have - // it randomly. - let mut potential_peers = self.peers.read().iter() - .filter(|&(_, peer)| peer.capabilities.serve_headers) - .map(|(id, _)| *id) - .collect::>(); - - let mut rng = ::rand::thread_rng(); - ::rand::Rng::shuffle(&mut rng, &mut potential_peers); - - let pending = Pending::HeaderByHash(req, sender); - - for id in potential_peers { - match ctx.request_from(id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request a block, given its header. Block bodies are requestable by hash only, /// and the header is required anyway to verify and complete the block body /// -- this just doesn't obscure the network query. @@ -246,7 +230,7 @@ impl OnDemand { stream.begin_list(0); stream.begin_list(0); - sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE) + sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } else { match self.cache.lock().block_body(&req.hash) { Some(body) => { @@ -254,43 +238,14 @@ impl OnDemand { stream.append_raw(&req.header.into_inner(), 1); stream.append_raw(&body.into_inner(), 2); - sender.complete(encoded::Block::new(stream.out())); + sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } - None => self.dispatch_block(ctx, req, sender), + None => self.dispatch(ctx, Pending::Block(req, sender)), } } receiver } - fn dispatch_block(&self, ctx: &BasicContext, req: request::Body, sender: Sender) { - let num = req.header.number(); - let les_req = LesRequest::Bodies(les_request::Bodies { - block_hashes: vec![req.hash], - }); - let pending = Pending::Block(req, sender); - - // we're looking for a peer with serveChainSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request the receipts for a block. The header serves two purposes: /// provide the block hash to fetch receipts for, and for verification of the receipts root. pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Receiver> { @@ -298,88 +253,25 @@ impl OnDemand { // fast path for empty receipts. if req.0.receipts_root() == SHA3_NULL_RLP { - sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) + sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); } else { match self.cache.lock().block_receipts(&req.0.hash()) { Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), - None => self.dispatch_block_receipts(ctx, req, sender), + None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } } receiver } - fn dispatch_block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts, sender: Sender>) { - let num = req.0.number(); - let les_req = LesRequest::Receipts(les_request::Receipts { - block_hashes: vec![req.0.hash()], - }); - let pending = Pending::BlockReceipts(req, sender); - - // we're looking for a peer with serveChainSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request an account by address and block header -- which gives a hash to query and a state root /// to verify against. - pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { + pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver> { let (sender, receiver) = oneshot::channel(); - self.dispatch_account(ctx, req, sender); + self.dispatch(ctx, Pending::Account(req, sender)); receiver } - fn dispatch_account(&self, ctx: &BasicContext, req: request::Account, sender: Sender) { - let num = req.header.number(); - let les_req = LesRequest::StateProofs(les_request::StateProofs { - requests: vec![les_request::StateProof { - block: req.header.hash(), - key1: ::util::Hashable::sha3(&req.address), - key2: None, - from_level: 0, - }], - }); - let pending = Pending::Account(req, sender); - - // we're looking for a peer with serveStateSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request code by address, known code hash, and block header. pub fn code(&self, ctx: &BasicContext, req: request::Code) -> Receiver { let (sender, receiver) = oneshot::channel(); @@ -388,88 +280,50 @@ impl OnDemand { if req.code_hash == ::util::sha3::SHA3_EMPTY { sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) } else { - self.dispatch_code(ctx, req, sender); + self.dispatch(ctx, Pending::Code(req, sender)); } receiver } - fn dispatch_code(&self, ctx: &BasicContext, req: request::Code, sender: Sender) { - let num = req.block_id.1; - let les_req = LesRequest::Codes(les_request::ContractCodes { - code_requests: vec![les_request::ContractCode { - block_hash: req.block_id.0, - account_key: ::util::Hashable::sha3(&req.address), - }] - }); - let pending = Pending::Code(req, sender); - - // we're looking for a peer with serveStateSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request proof-of-execution for a transaction. pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { let (sender, receiver) = oneshot::channel(); - self.dispatch_transaction_proof(ctx, req, sender); + self.dispatch(ctx, Pending::TxProof(req, sender)); receiver } - fn dispatch_transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof, sender: Sender>) { - let num = req.header.number(); - let les_req = LesRequest::TransactionProof(les_request::TransactionProof { - at: req.header.hash(), - from: req.tx.sender(), - gas: req.tx.gas, - gas_price: req.tx.gas_price, - action: req.tx.action.clone(), - value: req.tx.value, - data: req.tx.data.clone(), - }); - let pending = Pending::TxProof(req, sender); + // dispatch the request, with a "suitability" function to filter acceptable peers. + fn dispatch(&self, ctx: &BasicContext, pending: Pending) { + let mut builder = basic_request::RequestBuilder::default(); + builder.push(pending.make_request()) + .expect("make_request always returns fully complete request; qed"); + + let complete = builder.build(); - // we're looking for a peer with serveStateSince(num) for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + if !peer.can_handle(&pending) { continue } + match ctx.request_from(*id, complete.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + pending, + ); + return } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), } } trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) + self.orphaned_requests.write().push(pending); } + // dispatch orphaned requests, and discard those for which the corresponding // receiver has been dropped. fn dispatch_orphaned(&self, ctx: &BasicContext) { @@ -499,30 +353,22 @@ impl OnDemand { let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); - for orphaned in to_dispatch { - match orphaned { - Pending::HeaderByNumber(req, mut sender) => { - let hangup = match sender { + for mut orphaned in to_dispatch { + let hung_up = match orphaned { + Pending::HeaderProof(_, ref mut sender) => match *sender { ChtProofSender::Both(ref mut s) => check_hangup(s), - ChtProofSender::Header(ref mut s) => check_hangup(s), + ChtProofSender::Hash(ref mut s) => check_hangup(s), ChtProofSender::ChainScore(ref mut s) => check_hangup(s), - }; + }, + Pending::HeaderByHash(_, ref mut sender) => check_hangup(sender), + Pending::Block(_, ref mut sender) => check_hangup(sender), + Pending::BlockReceipts(_, ref mut sender) => check_hangup(sender), + Pending::Account(_, ref mut sender) => check_hangup(sender), + Pending::Code(_, ref mut sender) => check_hangup(sender), + Pending::TxProof(_, ref mut sender) => check_hangup(sender), + }; - if !hangup { self.dispatch_header_by_number(ctx, req, sender) } - } - Pending::HeaderByHash(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_header_by_hash(ctx, req, sender) }, - Pending::Block(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_block(ctx, req, sender) }, - Pending::BlockReceipts(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_block_receipts(ctx, req, sender) }, - Pending::Account(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_account(ctx, req, sender) }, - Pending::Code(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_code(ctx, req, sender) }, - Pending::TxProof(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_transaction_proof(ctx, req, sender) } - } + if !hung_up { self.dispatch(ctx, orphaned) } } } } @@ -560,218 +406,126 @@ impl Handler for OnDemand { self.dispatch_orphaned(ctx.as_basic()); } - fn on_header_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[(Bytes, Vec)]) { + fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { let peer = ctx.peer(); let req = match self.pending_requests.write().remove(&req_id) { Some(req) => req, None => return, }; + let response = match responses.get(0) { + Some(response) => response, + None => { + trace!(target: "on_demand", "Ignoring empty response for request {}", req_id); + self.dispatch(ctx.as_basic(), req); + return; + } + }; + + // handle the response appropriately for the request. + // all branches which do not return early lead to disabling of the peer + // due to misbehavior. match req { - Pending::HeaderByNumber(req, sender) => { - if let Some(&(ref header, ref proof)) = proofs.get(0) { - match req.check_response(header, proof) { - Ok((header, score)) => { + Pending::HeaderProof(req, sender) => { + if let NetworkResponse::HeaderProof(ref response) = *response { + match req.check_response(&response.proof) { + Ok((hash, score)) => { let mut cache = self.cache.lock(); - let hash = header.hash(); - cache.insert_block_header(hash, header.clone()); - cache.insert_block_hash(header.number(), hash); + cache.insert_block_hash(req.num(), hash); cache.insert_chain_score(hash, score); match sender { - ChtProofSender::Both(sender) => sender.complete((header, score)), - ChtProofSender::Header(sender) => sender.complete(header), - ChtProofSender::ChainScore(sender) => sender.complete(score), + ChtProofSender::Both(sender) => { let _ = sender.send((hash, score)); } + ChtProofSender::Hash(sender) => { let _ = sender.send(hash); } + ChtProofSender::ChainScore(sender) => { let _ = sender.send(score); } } - return } - Err(e) => { - warn!("Error handling response for header request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for header request: {:?}", e), } } - - self.dispatch_header_by_number(ctx.as_basic(), req, sender); } - _ => panic!("Only header by number request fetches header proofs; qed"), - } - } - - fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::HeaderByHash(req, sender) => { - if let Some(ref header) = headers.get(0) { - match req.check_response(header) { - Ok(header) => { - self.cache.lock().insert_block_header(req.0, header.clone()); - sender.complete(header); - return - } - Err(e) => { - warn!("Error handling response for header request: {:?}", e); - ctx.disable_peer(peer); + if let NetworkResponse::Headers(ref response) = *response { + if let Some(header) = response.headers.get(0) { + match req.check_response(header) { + Ok(header) => { + self.cache.lock().insert_block_header(req.0, header.clone()); + let _ = sender.send(header); + return + } + Err(e) => warn!("Error handling response for header request: {:?}", e), } } } - - self.dispatch_header_by_hash(ctx.as_basic(), req, sender); } - _ => panic!("Only header by hash request fetches headers; qed"), - } - } - - fn on_block_bodies(&self, ctx: &EventContext, req_id: ReqId, bodies: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Block(req, sender) => { - if let Some(ref body) = bodies.get(0) { - match req.check_response(body) { + if let NetworkResponse::Body(ref response) = *response { + match req.check_response(&response.body) { Ok(block) => { - let body = encoded::Body::new(body.to_vec()); - self.cache.lock().insert_block_body(req.hash, body); - sender.complete(block); + self.cache.lock().insert_block_body(req.hash, response.body.clone()); + let _ = sender.send(block); return } - Err(e) => { - warn!("Error handling response for block request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for block request: {:?}", e), } } - - self.dispatch_block(ctx.as_basic(), req, sender); } - _ => panic!("Only block request fetches bodies; qed"), - } - } - - fn on_receipts(&self, ctx: &EventContext, req_id: ReqId, receipts: &[Vec]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::BlockReceipts(req, sender) => { - if let Some(ref receipts) = receipts.get(0) { - match req.check_response(receipts) { + if let NetworkResponse::Receipts(ref response) = *response { + match req.check_response(&response.receipts) { Ok(receipts) => { let hash = req.0.hash(); self.cache.lock().insert_block_receipts(hash, receipts.clone()); - sender.complete(receipts); + let _ = sender.send(receipts); return } - Err(e) => { - warn!("Error handling response for receipts request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for receipts request: {:?}", e), } } - - self.dispatch_block_receipts(ctx.as_basic(), req, sender); } - _ => panic!("Only receipts request fetches receipts; qed"), - } - } - - fn on_state_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[Vec]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Account(req, sender) => { - if let Some(ref proof) = proofs.get(0) { - match req.check_response(proof) { - Ok(proof) => { - sender.complete(proof); + if let NetworkResponse::Account(ref response) = *response { + match req.check_response(&response.proof) { + Ok(maybe_account) => { + // TODO: validate against request outputs. + // needs engine + env info as part of request. + let _ = sender.send(maybe_account); return } - Err(e) => { - warn!("Error handling response for state request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for state request: {:?}", e), } } - - self.dispatch_account(ctx.as_basic(), req, sender); } - _ => panic!("Only account request fetches state proof; qed"), - } - } - - fn on_code(&self, ctx: &EventContext, req_id: ReqId, codes: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Code(req, sender) => { - if let Some(code) = codes.get(0) { - match req.check_response(code.as_slice()) { + if let NetworkResponse::Code(ref response) = *response { + match req.check_response(response.code.as_slice()) { Ok(()) => { - sender.complete(code.clone()); + let _ = sender.send(response.code.clone()); return } - Err(e) => { - warn!("Error handling response for code request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for code request: {:?}", e), } - - self.dispatch_code(ctx.as_basic(), req, sender); } } - _ => panic!("Only code request fetches code; qed"), - } - } - - fn on_transaction_proof(&self, ctx: &EventContext, req_id: ReqId, items: &[DBValue]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::TxProof(req, sender) => { - match req.check_response(items) { - ProvedExecution::Complete(executed) => { - sender.complete(Ok(executed)); - return - } - ProvedExecution::Failed(err) => { - sender.complete(Err(err)); - return - } - ProvedExecution::BadProof => { - warn!("Error handling response for transaction proof request"); - ctx.disable_peer(peer); + if let NetworkResponse::Execution(ref response) = *response { + match req.check_response(&response.items) { + ProvedExecution::Complete(executed) => { + let _ = sender.send(Ok(executed)); + return + } + ProvedExecution::Failed(err) => { + let _ = sender.send(Err(err)); + return + } + ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), } } - - self.dispatch_transaction_proof(ctx.as_basic(), req, sender); } - _ => panic!("Only transaction proof request dispatches transaction proof requests; qed"), } + + ctx.disable_peer(peer); } fn tick(&self, ctx: &BasicContext) { @@ -787,7 +541,7 @@ mod tests { use cache::Cache; use net::{Announcement, BasicContext, ReqId, Error as LesError}; - use request::{Request as LesRequest, Kind as LesRequestKind}; + use request::Requests; use network::{PeerId, NodeId}; use time::Duration; @@ -797,11 +551,10 @@ mod tests { impl BasicContext for FakeContext { fn persistent_peer_id(&self, _: PeerId) -> Option { None } - fn request_from(&self, _: PeerId, _: LesRequest) -> Result { + fn request_from(&self, _: PeerId, _: Requests) -> Result { unimplemented!() } fn make_announcement(&self, _: Announcement) { } - fn max_requests(&self, _: PeerId, _: LesRequestKind) -> usize { 0 } fn disconnect_peer(&self, _: PeerId) { } fn disable_peer(&self, _: PeerId) { } } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 1db796982..cda1d6feb 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -61,9 +61,9 @@ impl From> for Error { } } -/// Request for a header by number. +/// Request for header proof by number #[derive(Debug, Clone, PartialEq, Eq)] -pub struct HeaderByNumber { +pub struct HeaderProof { /// The header's number. num: u64, /// The cht number for the given block number. @@ -72,11 +72,11 @@ pub struct HeaderByNumber { cht_root: H256, } -impl HeaderByNumber { +impl HeaderProof { /// Construct a new header-by-number request. Fails if the given number is 0. /// Provide the expected CHT root to compare against. pub fn new(num: u64, cht_root: H256) -> Option { - ::cht::block_to_cht_number(num).map(|cht_num| HeaderByNumber { + ::cht::block_to_cht_number(num).map(|cht_num| HeaderProof { num: num, cht_num: cht_num, cht_root: cht_root, @@ -92,18 +92,11 @@ impl HeaderByNumber { /// Access the expected CHT root. pub fn cht_root(&self) -> H256 { self.cht_root } - /// Check a response with a header and cht proof. - pub fn check_response(&self, header: &[u8], proof: &[Bytes]) -> Result<(encoded::Header, U256), Error> { - let (expected_hash, td) = match ::cht::check_proof(proof, self.num, self.cht_root) { - Some((expected_hash, td)) => (expected_hash, td), - None => return Err(Error::BadProof), - }; - - // and compare the hash to the found header. - let found_hash = header.sha3(); - match expected_hash == found_hash { - true => Ok((encoded::Header::new(header.to_vec()), td)), - false => Err(Error::WrongHash(expected_hash, found_hash)), + /// Check a response with a CHT proof, get a hash and total difficulty back. + pub fn check_response(&self, proof: &[Bytes]) -> Result<(H256, U256), Error> { + match ::cht::check_proof(proof, self.num, self.cht_root) { + Some((expected_hash, td)) => Ok((expected_hash, td)), + None => Err(Error::BadProof), } } } @@ -114,10 +107,10 @@ pub struct HeaderByHash(pub H256); impl HeaderByHash { /// Check a response for the header. - pub fn check_response(&self, header: &[u8]) -> Result { + pub fn check_response(&self, header: &encoded::Header) -> Result { let hash = header.sha3(); match hash == self.0 { - true => Ok(encoded::Header::new(header.to_vec())), + true => Ok(header.clone()), false => Err(Error::WrongHash(self.0, hash)), } } @@ -143,16 +136,14 @@ impl Body { } /// Check a response for this block body. - pub fn check_response(&self, body: &[u8]) -> Result { - let body_view = UntrustedRlp::new(&body); - + pub fn check_response(&self, body: &encoded::Body) -> Result { // check the integrity of the the body against the header - let tx_root = ::util::triehash::ordered_trie_root(body_view.at(0)?.iter().map(|r| r.as_raw().to_vec())); + let tx_root = ::util::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec())); if tx_root != self.header.transactions_root() { return Err(Error::WrongTrieRoot(self.header.transactions_root(), tx_root)); } - let uncles_hash = body_view.at(1)?.as_raw().sha3(); + let uncles_hash = body.rlp().at(1).as_raw().sha3(); if uncles_hash != self.header.uncles_hash() { return Err(Error::WrongHash(self.header.uncles_hash(), uncles_hash)); } @@ -160,7 +151,7 @@ impl Body { // concatenate the header and the body. let mut stream = RlpStream::new_list(3); stream.append_raw(self.header.rlp().as_raw(), 1); - stream.append_raw(body, 2); + stream.append_raw(&body.rlp().as_raw(), 2); Ok(encoded::Block::new(stream.out())) } @@ -194,7 +185,7 @@ pub struct Account { impl Account { /// Check a response with an account against the stored header. - pub fn check_response(&self, proof: &[Bytes]) -> Result { + pub fn check_response(&self, proof: &[Bytes]) -> Result, Error> { let state_root = self.header.state_root(); let mut db = MemoryDB::new(); @@ -203,14 +194,14 @@ impl Account { match TrieDB::new(&db, &state_root).and_then(|t| t.get(&self.address.sha3()))? { Some(val) => { let rlp = UntrustedRlp::new(&val); - Ok(BasicAccount { + Ok(Some(BasicAccount { nonce: rlp.val_at(0)?, balance: rlp.val_at(1)?, storage_root: rlp.val_at(2)?, code_hash: rlp.val_at(3)?, - }) + })) }, - None => Err(Error::BadProof) + None => Ok(None), } } } @@ -219,8 +210,6 @@ impl Account { pub struct Code { /// Block hash, number pair. pub block_id: (H256, u64), - /// Address requested. - pub address: Address, /// Account's code hash. pub code_hash: H256, } @@ -278,11 +267,11 @@ mod tests { #[test] fn no_invalid_header_by_number() { - assert!(HeaderByNumber::new(0, Default::default()).is_none()) + assert!(HeaderProof::new(0, Default::default()).is_none()) } #[test] - fn check_header_by_number() { + fn check_header_proof() { use ::cht; let test_client = TestBlockChainClient::new(); @@ -303,11 +292,9 @@ mod tests { }; let proof = cht.prove(10_000, 0).unwrap().unwrap(); - let req = HeaderByNumber::new(10_000, cht.root()).unwrap(); + let req = HeaderProof::new(10_000, cht.root()).unwrap(); - let raw_header = test_client.block_header(::ethcore::ids::BlockId::Number(10_000)).unwrap(); - - assert!(req.check_response(&raw_header.into_inner(), &proof[..]).is_ok()); + assert!(req.check_response(&proof[..]).is_ok()); } #[test] @@ -316,9 +303,9 @@ mod tests { header.set_number(10_000); header.set_extra_data(b"test_header".to_vec()); let hash = header.hash(); - let raw_header = ::rlp::encode(&header); + let raw_header = encoded::Header::new(::rlp::encode(&header).to_vec()); - assert!(HeaderByHash(hash).check_response(&*raw_header).is_ok()) + assert!(HeaderByHash(hash).check_response(&raw_header).is_ok()) } #[test] @@ -334,7 +321,8 @@ mod tests { hash: header.hash(), }; - assert!(req.check_response(&*body_stream.drain()).is_ok()) + let response = encoded::Body::new(body_stream.drain().to_vec()); + assert!(req.check_response(&response).is_ok()) } #[test] @@ -412,7 +400,6 @@ mod tests { let code = vec![1u8; 256]; let req = Code { block_id: (Default::default(), 2), - address: Default::default(), code_hash: ::util::Hashable::sha3(&code), }; diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index d9f3937da..e74ab1c70 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -24,22 +24,15 @@ use ethcore::client::{BlockChainClient, ProvingBlockChainClient}; use ethcore::transaction::PendingTransaction; use ethcore::ids::BlockId; use ethcore::encoded; -use util::{Bytes, DBValue, RwLock, H256}; +use util::{RwLock, H256}; use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; use transaction_queue::TransactionQueue; - use request; -/// Defines the operations that a provider for `LES` must fulfill. -/// -/// These are defined at [1], but may be subject to change. -/// Requests which can't be fulfilled should return either an empty RLP list -/// or empty vector where appropriate. -/// -/// [1]: https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) +/// Defines the operations that a provider for the light subprotocol must fulfill. #[cfg_attr(feature = "ipc", ipc(client_ident="LightProviderClient"))] pub trait Provider: Send + Sync { /// Provide current blockchain info. @@ -59,18 +52,18 @@ pub trait Provider: Send + Sync { /// /// The returned vector may have any length in the range [0, `max`], but the /// results within must adhere to the `skip` and `reverse` parameters. - fn block_headers(&self, req: request::Headers) -> Vec { + fn block_headers(&self, req: request::CompleteHeadersRequest) -> Option { use request::HashOrNumber; - if req.max == 0 { return Vec::new() } + if req.max == 0 { return None } let best_num = self.chain_info().best_block_number; let start_num = match req.start { HashOrNumber::Number(start_num) => start_num, HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { None => { - trace!(target: "les_provider", "Unknown block hash {} requested", hash); - return Vec::new(); + trace!(target: "pip_provider", "Unknown block hash {} requested", hash); + return None; } Some(header) => { let num = header.number(); @@ -79,7 +72,9 @@ pub trait Provider: Send + Sync { if req.max == 1 || canon_hash != Some(hash) { // Non-canonical header or single header requested. - return vec![header]; + return Some(::request::HeadersResponse { + headers: vec![header], + }) } num @@ -87,116 +82,50 @@ pub trait Provider: Send + Sync { } }; - (0u64..req.max as u64) + let headers: Vec<_> = (0u64..req.max as u64) .map(|x: u64| x.saturating_mul(req.skip + 1)) .take_while(|x| if req.reverse { x < &start_num } else { best_num.saturating_sub(start_num) >= *x }) .map(|x| if req.reverse { start_num - x } else { start_num + x }) .map(|x| self.block_header(BlockId::Number(x))) .take_while(|x| x.is_some()) .flat_map(|x| x) - .collect() + .collect(); + + if headers.is_empty() { + None + } else { + Some(::request::HeadersResponse { headers: headers }) + } } /// Get a block header by id. fn block_header(&self, id: BlockId) -> Option; - /// Provide as many as possible of the requested blocks (minus the headers) encoded - /// in RLP format. - fn block_bodies(&self, req: request::Bodies) -> Vec> { - req.block_hashes.into_iter() - .map(|hash| self.block_body(BlockId::Hash(hash))) - .collect() - } + /// Fulfill a block body request. + fn block_body(&self, req: request::CompleteBodyRequest) -> Option; - /// Get a block body by id. - fn block_body(&self, id: BlockId) -> Option; + /// Fulfill a request for block receipts. + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; - /// Provide the receipts as many as possible of the requested blocks. - /// Returns a vector of RLP-encoded lists of receipts. - fn receipts(&self, req: request::Receipts) -> Vec { - req.block_hashes.into_iter() - .map(|hash| self.block_receipts(&hash)) - .map(|receipts| receipts.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec())) - .collect() - } + /// Get an account proof. + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option; - /// Get a block's receipts as an RLP-encoded list by block hash. - fn block_receipts(&self, hash: &H256) -> Option; + /// Get a storage proof. + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option; - /// Provide a set of merkle proofs, as requested. Each request is a - /// block hash and request parameters. - /// - /// Returns a vector of RLP-encoded lists satisfying the requests. - fn proofs(&self, req: request::StateProofs) -> Vec { - use rlp::RlpStream; - - let mut results = Vec::with_capacity(req.requests.len()); - - for request in req.requests { - let proof = self.state_proof(request); - - let mut stream = RlpStream::new_list(proof.len()); - for node in proof { - stream.append_raw(&node, 1); - } - - results.push(stream.out()); - } - - results - } - - /// Get a state proof from a request. Each proof should be a vector - /// of rlp-encoded trie nodes, in ascending order by distance from the root. - fn state_proof(&self, req: request::StateProof) -> Vec; - - /// Provide contract code for the specified (block_hash, account_hash) pairs. - /// Each item in the resulting vector is either the raw bytecode or empty. - fn contract_codes(&self, req: request::ContractCodes) -> Vec { - req.code_requests.into_iter() - .map(|req| self.contract_code(req)) - .collect() - } - - /// Get contract code by request. Either the raw bytecode or empty. - fn contract_code(&self, req: request::ContractCode) -> Bytes; - - /// Provide header proofs from the Canonical Hash Tries as well as the headers - /// they correspond to -- each element in the returned vector is a 2-tuple. - /// The first element is a block header and the second a merkle proof of - /// the header in a requested CHT. - fn header_proofs(&self, req: request::HeaderProofs) -> Vec { - use rlp::{self, RlpStream}; - - req.requests.into_iter() - .map(|req| self.header_proof(req)) - .map(|maybe_proof| match maybe_proof { - None => rlp::EMPTY_LIST_RLP.to_vec(), - Some((header, proof)) => { - let mut stream = RlpStream::new_list(2); - stream.append_raw(&header.into_inner(), 1).begin_list(proof.len()); - - for node in proof { - stream.append_raw(&node, 1); - } - - stream.out() - } - }) - .collect() - } + /// Provide contract code for the specified (block_hash, code_hash) pair. + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option; /// Provide a header proof from a given Canonical Hash Trie as well as the - /// corresponding header. The first element is the block header and the - /// second is a merkle proof of the CHT. - fn header_proof(&self, req: request::HeaderProof) -> Option<(encoded::Header, Vec)>; + /// corresponding header. + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; /// Provide pending transactions. fn ready_transactions(&self) -> Vec; /// Provide a proof-of-execution for the given transaction proof request. /// Returns a vector of all state items necessary to execute the transaction. - fn transaction_proof(&self, req: request::TransactionProof) -> Option>; + fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option; } // Implementation of a light client data provider for a client. @@ -217,32 +146,52 @@ impl Provider for T { BlockChainClient::block_header(self, id) } - fn block_body(&self, id: BlockId) -> Option { - BlockChainClient::block_body(self, id) + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + BlockChainClient::block_body(self, BlockId::Hash(req.hash)) + .map(|body| ::request::BodyResponse { body: body }) } - fn block_receipts(&self, hash: &H256) -> Option { - BlockChainClient::block_receipts(self, hash) + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { + BlockChainClient::block_receipts(self, &req.hash) + .map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode_list(&x) }) } - fn state_proof(&self, req: request::StateProof) -> Vec { - match req.key2 { - Some(key2) => self.prove_storage(req.key1, key2, req.from_level, BlockId::Hash(req.block)), - None => self.prove_account(req.key1, req.from_level, BlockId::Hash(req.block)), - } + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + self.prove_account(req.address_hash, BlockId::Hash(req.block_hash)).map(|(proof, acc)| { + ::request::AccountResponse { + proof: proof, + nonce: acc.nonce, + balance: acc.balance, + code_hash: acc.code_hash, + storage_root: acc.storage_root, + } + }) } - fn contract_code(&self, req: request::ContractCode) -> Bytes { - self.code_by_hash(req.account_key, BlockId::Hash(req.block_hash)) + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + self.prove_storage(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { + ::request::StorageResponse { + proof: proof, + value: item, + } + }) } - fn header_proof(&self, req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { - if Some(req.cht_number) != cht::block_to_cht_number(req.block_number) { - debug!(target: "les_provider", "Requested CHT number mismatch with block number."); - return None; - } + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + self.state_data(&req.code_hash) + .map(|code| ::request::CodeResponse { code: code }) + } - let mut needed_hdr = None; + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { + let cht_number = match cht::block_to_cht_number(req.num) { + Some(cht_num) => cht_num, + None => { + debug!(target: "pip_provider", "Requested CHT proof with invalid block number"); + return None; + } + }; + + let mut needed = None; // build the CHT, caching the requested header as we pass through it. let cht = { @@ -258,8 +207,8 @@ impl Provider for T { total_difficulty: td, }; - if hdr.number() == req.block_number { - needed_hdr = Some(hdr); + if hdr.number() == req.num { + needed = Some((hdr, td)); } Some(info) @@ -268,29 +217,33 @@ impl Provider for T { } }; - match cht::build(req.cht_number, block_info) { + match cht::build(cht_number, block_info) { Some(cht) => cht, None => return None, // incomplete CHT. } }; - let needed_hdr = needed_hdr.expect("`needed_hdr` always set in loop, number checked before; qed"); + let (needed_hdr, needed_td) = needed.expect("`needed` always set in loop, number checked before; qed"); // prove our result. - match cht.prove(req.block_number, req.from_level) { - Ok(Some(proof)) => Some((needed_hdr, proof)), + match cht.prove(req.num, 0) { + Ok(Some(proof)) => Some(::request::HeaderProofResponse { + proof: proof, + hash: needed_hdr.hash(), + td: needed_td, + }), Ok(None) => None, Err(e) => { - debug!(target: "les_provider", "Error looking up number in freshly-created CHT: {}", e); + debug!(target: "pip_provider", "Error looking up number in freshly-created CHT: {}", e); None } } } - fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option { use ethcore::transaction::Transaction; - let id = BlockId::Hash(req.at); + let id = BlockId::Hash(req.block_hash); let nonce = match self.nonce(&req.from, id.clone()) { Some(nonce) => nonce, None => return None, @@ -305,6 +258,7 @@ impl Provider for T { }.fake_sign(req.from); self.prove_transaction(transaction, id) + .map(|proof| ::request::ExecutionResponse { items: proof }) } fn ready_transactions(&self) -> Vec { @@ -347,27 +301,31 @@ impl Provider for LightProvider { self.client.as_light_client().block_header(id) } - fn block_body(&self, _id: BlockId) -> Option { + fn block_body(&self, _req: request::CompleteBodyRequest) -> Option { None } - fn block_receipts(&self, _hash: &H256) -> Option { + fn block_receipts(&self, _req: request::CompleteReceiptsRequest) -> Option { None } - fn state_proof(&self, _req: request::StateProof) -> Vec { - Vec::new() - } - - fn contract_code(&self, _req: request::ContractCode) -> Bytes { - Vec::new() - } - - fn header_proof(&self, _req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { + fn account_proof(&self, _req: request::CompleteAccountRequest) -> Option { None } - fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + fn storage_proof(&self, _req: request::CompleteStorageRequest) -> Option { + None + } + + fn contract_code(&self, _req: request::CompleteCodeRequest) -> Option { + None + } + + fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { + None + } + + fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { None } @@ -395,10 +353,8 @@ mod tests { let client = TestBlockChainClient::new(); client.add_blocks(2000, EachBlockWith::Nothing); - let req = ::request::HeaderProof { - cht_number: 0, - block_number: 1500, - from_level: 0, + let req = ::request::CompleteHeaderProofRequest { + num: 1500, }; assert!(client.header_proof(req.clone()).is_none()); diff --git a/ethcore/light/src/types/les_request.rs b/ethcore/light/src/types/les_request.rs deleted file mode 100644 index dbff19eb5..000000000 --- a/ethcore/light/src/types/les_request.rs +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! LES request types. - -use ethcore::transaction::Action; -use util::{Address, H256, U256, Uint}; - -/// Either a hash or a number. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum HashOrNumber { - /// Block hash variant. - Hash(H256), - /// Block number variant. - Number(u64), -} - -impl From for HashOrNumber { - fn from(hash: H256) -> Self { - HashOrNumber::Hash(hash) - } -} - -impl From for HashOrNumber { - fn from(num: u64) -> Self { - HashOrNumber::Number(num) - } -} - -/// A request for block headers. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Headers { - /// Starting block number or hash. - pub start: HashOrNumber, - /// The maximum amount of headers which can be returned. - pub max: usize, - /// The amount of headers to skip between each response entry. - pub skip: u64, - /// Whether the headers should proceed in falling number from the initial block. - pub reverse: bool, -} - -/// A request for specific block bodies. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Bodies { - /// Hashes which bodies are being requested for. - pub block_hashes: Vec -} - -/// A request for transaction receipts. -/// -/// This request is answered with a list of transaction receipts for each block -/// requested. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Receipts { - /// Block hashes to return receipts for. - pub block_hashes: Vec, -} - -/// A request for a state proof -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct StateProof { - /// Block hash to query state from. - pub block: H256, - /// Key of the state trie -- corresponds to account hash. - pub key1: H256, - /// Key in that account's storage trie; if empty, then the account RLP should be - /// returned. - pub key2: Option, - /// if greater than zero, trie nodes beyond this level may be omitted. - pub from_level: u32, // could even safely be u8; trie w/ 32-byte key can be at most 64-levels deep. -} - -/// A request for state proofs. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct StateProofs { - /// All the proof requests. - pub requests: Vec, -} - -/// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct ContractCode { - /// Block hash - pub block_hash: H256, - /// Account key (== sha3(address)) - pub account_key: H256, -} - -/// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct ContractCodes { - /// Block hash and account key (== sha3(address)) pairs to fetch code for. - pub code_requests: Vec, -} - -/// A request for a header proof from the Canonical Hash Trie. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct HeaderProof { - /// Number of the CHT. - pub cht_number: u64, - /// Block number requested. May not be 0: genesis isn't included in any CHT. - pub block_number: u64, - /// If greater than zero, trie nodes beyond this level may be omitted. - pub from_level: u32, -} - -/// A request for header proofs from the CHT. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct HeaderProofs { - /// All the proof requests. - pub requests: Vec, -} - -/// A request for proof of (simulated) transaction execution. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct TransactionProof { - /// Block hash to request for. - pub at: H256, - /// Address to treat as the caller. - pub from: Address, - /// Action to take: either a call or a create. - pub action: Action, - /// Amount of gas to request proof-of-execution for. - pub gas: U256, - /// Price for each gas. - pub gas_price: U256, - /// Value to simulate sending. - pub value: U256, - /// Transaction data. - pub data: Vec, -} - -/// Kinds of requests. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum Kind { - /// Requesting headers. - Headers, - /// Requesting block bodies. - Bodies, - /// Requesting transaction receipts. - Receipts, - /// Requesting proofs of state trie nodes. - StateProofs, - /// Requesting contract code by hash. - Codes, - /// Requesting header proofs (from the CHT). - HeaderProofs, - /// Requesting proof of transaction execution. - TransactionProof, -} - -/// Encompasses all possible types of requests in a single structure. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum Request { - /// Requesting headers. - Headers(Headers), - /// Requesting block bodies. - Bodies(Bodies), - /// Requesting transaction receipts. - Receipts(Receipts), - /// Requesting state proofs. - StateProofs(StateProofs), - /// Requesting contract codes. - Codes(ContractCodes), - /// Requesting header proofs. - HeaderProofs(HeaderProofs), - /// Requesting proof of transaction execution. - TransactionProof(TransactionProof), -} - -impl Request { - /// Get the kind of request this is. - pub fn kind(&self) -> Kind { - match *self { - Request::Headers(_) => Kind::Headers, - Request::Bodies(_) => Kind::Bodies, - Request::Receipts(_) => Kind::Receipts, - Request::StateProofs(_) => Kind::StateProofs, - Request::Codes(_) => Kind::Codes, - Request::HeaderProofs(_) => Kind::HeaderProofs, - Request::TransactionProof(_) => Kind::TransactionProof, - } - } - - /// Get the amount of requests being made. - /// In the case of `TransactionProof`, this is the amount of gas being requested. - pub fn amount(&self) -> usize { - match *self { - Request::Headers(ref req) => req.max, - Request::Bodies(ref req) => req.block_hashes.len(), - Request::Receipts(ref req) => req.block_hashes.len(), - Request::StateProofs(ref req) => req.requests.len(), - Request::Codes(ref req) => req.code_requests.len(), - Request::HeaderProofs(ref req) => req.requests.len(), - Request::TransactionProof(ref req) => match req.gas > usize::max_value().into() { - true => usize::max_value(), - false => req.gas.low_u64() as usize, - } - } - } -} diff --git a/ethcore/light/src/types/mod.rs.in b/ethcore/light/src/types/mod.rs.in index 0adfbf0e4..eba551b53 100644 --- a/ethcore/light/src/types/mod.rs.in +++ b/ethcore/light/src/types/mod.rs.in @@ -14,4 +14,4 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -pub mod les_request; \ No newline at end of file +pub mod request; diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs new file mode 100644 index 000000000..77f1389c2 --- /dev/null +++ b/ethcore/light/src/types/request/builder.rs @@ -0,0 +1,190 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Request chain builder utility. +//! Push requests with `push`. Back-references and data required to verify responses must be +//! supplied as well. + +use std::collections::HashMap; +use request::{ + IncompleteRequest, CompleteRequest, Request, + OutputKind, Output, NoSuchOutput, Response, ResponseError, +}; + +/// Build chained requests. Push them onto the series with `push`, +/// and produce a `Requests` object with `build`. Outputs are checked for consistency. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct RequestBuilder { + output_kinds: HashMap<(usize, usize), OutputKind>, + requests: Vec, +} + +impl RequestBuilder { + /// Attempt to push a request onto the request chain. Fails if the request + /// references a non-existent output of a prior request. + pub fn push(&mut self, request: Request) -> Result<(), NoSuchOutput> { + request.check_outputs(|req, idx, kind| { + match self.output_kinds.get(&(req, idx)) { + Some(k) if k == &kind => Ok(()), + _ => Err(NoSuchOutput), + } + })?; + let req_idx = self.requests.len(); + request.note_outputs(|idx, kind| { self.output_kinds.insert((req_idx, idx), kind); }); + self.requests.push(request); + Ok(()) + } + + /// Get a reference to the output kinds map. + pub fn output_kinds(&self) -> &HashMap<(usize, usize), OutputKind> { + &self.output_kinds + } + + /// Convert this into a "requests" object. + pub fn build(self) -> Requests { + Requests { + outputs: HashMap::new(), + requests: self.requests, + answered: 0, + } + } +} + +/// Requests pending responses. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Requests { + outputs: HashMap<(usize, usize), Output>, + requests: Vec, + answered: usize, +} + +impl Requests { + /// For each request, produce responses for each. + /// The responses vector produced goes up to the point where the responder + /// first returns `None`, an invalid response, or until all requests have been responded to. + pub fn respond_to_all(mut self, responder: F) -> Vec + where F: Fn(CompleteRequest) -> Option + { + let mut responses = Vec::new(); + + while let Some(response) = self.next_complete().and_then(&responder) { + match self.supply_response(&response) { + Ok(()) => responses.push(response), + Err(e) => { + debug!(target: "pip", "produced bad response to request: {:?}", e); + return responses; + } + } + } + + responses + } + + /// Get access to the underlying slice of requests. + // TODO: unimplemented -> Vec, // do we _have to_ allocate? + pub fn requests(&self) -> &[Request] { &self.requests } + + /// Get the number of answered requests. + pub fn num_answered(&self) -> usize { self.answered } + + /// Get the next request as a filled request. Returns `None` when all requests answered. + pub fn next_complete(&self) -> Option { + if self.answered == self.requests.len() { + None + } else { + Some(self.requests[self.answered].clone() + .complete() + .expect("All outputs checked as invariant of `Requests` object; qed")) + } + } + + /// Supply a response for the next request. + /// Fails on: wrong request kind, all requests answered already. + pub fn supply_response(&mut self, response: &Response) -> Result<(), ResponseError> { + let idx = self.answered; + + // check validity. + if idx == self.requests.len() { return Err(ResponseError::Unexpected) } + if self.requests[idx].kind() != response.kind() { return Err(ResponseError::WrongKind) } + + let outputs = &mut self.outputs; + response.fill_outputs(|out_idx, output| { + // we don't need to check output kinds here because all back-references + // are validated in the builder. + // TODO: optimization for only storing outputs we "care about"? + outputs.insert((idx, out_idx), output); + }); + + self.answered += 1; + + // fill as much of the next request as we can. + if let Some(ref mut req) = self.requests.get_mut(self.answered) { + req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use request::*; + use super::RequestBuilder; + use util::H256; + + #[test] + fn all_scalar() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })).unwrap(); + builder.push(Request::Receipts(IncompleteReceiptsRequest { + hash: H256::default().into(), + })).unwrap(); + } + + #[test] + #[should_panic] + fn missing_backref() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(100, 3), + })).unwrap(); + } + + #[test] + #[should_panic] + fn wrong_kind() { + let mut builder = RequestBuilder::default(); + assert!(builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })).is_ok()); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(0, 0), + })).unwrap(); + } + + #[test] + fn good_backreference() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), // header proof puts hash at output 0. + })).unwrap(); + builder.push(Request::Receipts(IncompleteReceiptsRequest { + hash: Field::BackReference(0, 0), + })).unwrap(); + } +} diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs new file mode 100644 index 000000000..83d7963ac --- /dev/null +++ b/ethcore/light/src/types/request/mod.rs @@ -0,0 +1,1710 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light protocol request types. + +use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; +use util::H256; + +mod builder; + +// re-exports of request types. +pub use self::header::{ + Complete as CompleteHeadersRequest, + Incomplete as IncompleteHeadersRequest, + Response as HeadersResponse +}; +pub use self::header_proof::{ + Complete as CompleteHeaderProofRequest, + Incomplete as IncompleteHeaderProofRequest, + Response as HeaderProofResponse +}; +pub use self::block_body::{ + Complete as CompleteBodyRequest, + Incomplete as IncompleteBodyRequest, + Response as BodyResponse +}; +pub use self::block_receipts::{ + Complete as CompleteReceiptsRequest, + Incomplete as IncompleteReceiptsRequest, + Response as ReceiptsResponse +}; +pub use self::account::{ + Complete as CompleteAccountRequest, + Incomplete as IncompleteAccountRequest, + Response as AccountResponse, +}; +pub use self::storage::{ + Complete as CompleteStorageRequest, + Incomplete as IncompleteStorageRequest, + Response as StorageResponse +}; +pub use self::contract_code::{ + Complete as CompleteCodeRequest, + Incomplete as IncompleteCodeRequest, + Response as CodeResponse, +}; +pub use self::execution::{ + Complete as CompleteExecutionRequest, + Incomplete as IncompleteExecutionRequest, + Response as ExecutionResponse, +}; + +pub use self::builder::{RequestBuilder, Requests}; + +/// Error indicating a reference to a non-existent or wrongly-typed output. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct NoSuchOutput; + +/// Error on processing a response. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ResponseError { + /// Wrong kind of response. + WrongKind, + /// No responses expected. + Unexpected, +} + +/// An input to a request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Field { + /// A pre-specified input. + Scalar(T), + /// An input which can be resolved later on. + /// (Request index, output index) + BackReference(usize, usize), +} + +impl Field { + // attempt conversion into scalar value. + fn into_scalar(self) -> Result { + match self { + Field::Scalar(val) => Ok(val), + _ => Err(NoSuchOutput), + } + } +} + +impl From for Field { + fn from(val: T) -> Self { + Field::Scalar(val) + } +} + +impl Decodable for Field { + fn decode(rlp: &UntrustedRlp) -> Result { + match rlp.val_at::(0)? { + 0 => Ok(Field::Scalar(rlp.val_at::(1)?)), + 1 => Ok({ + let inner_rlp = rlp.at(1)?; + Field::BackReference(inner_rlp.val_at(0)?, inner_rlp.val_at(1)?) + }), + _ => Err(DecoderError::Custom("Unknown discriminant for PIP field.")), + } + } +} + +impl Encodable for Field { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + match *self { + Field::Scalar(ref data) => { + s.append(&0u8).append(data); + } + Field::BackReference(ref req, ref idx) => { + s.append(&1u8).begin_list(2).append(req).append(idx); + } + } + } +} + +/// Request outputs which can be reused as inputs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Output { + /// A 32-byte hash output. + Hash(H256), + /// An unsigned-integer output. + Number(u64), +} + +impl Output { + /// Get the output kind. + pub fn kind(&self) -> OutputKind { + match *self { + Output::Hash(_) => OutputKind::Hash, + Output::Number(_) => OutputKind::Number, + } + } +} + +/// Response output kinds which can be used as back-references. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OutputKind { + /// A 32-byte hash output. + Hash, + /// An unsigned-integer output. + Number, +} + +/// Either a hash or a number. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", binary)] +pub enum HashOrNumber { + /// Block hash variant. + Hash(H256), + /// Block number variant. + Number(u64), +} + +impl From for HashOrNumber { + fn from(hash: H256) -> Self { + HashOrNumber::Hash(hash) + } +} + +impl From for HashOrNumber { + fn from(num: u64) -> Self { + HashOrNumber::Number(num) + } +} + +impl Decodable for HashOrNumber { + fn decode(rlp: &UntrustedRlp) -> Result { + rlp.as_val::().map(HashOrNumber::Hash) + .or_else(|_| rlp.as_val().map(HashOrNumber::Number)) + } +} + +impl Encodable for HashOrNumber { + fn rlp_append(&self, s: &mut RlpStream) { + match *self { + HashOrNumber::Hash(ref hash) => s.append(hash), + HashOrNumber::Number(ref num) => s.append(num), + }; + } +} + +/// All request types, as they're sent over the network. +/// They may be incomplete, with back-references to outputs +/// of prior requests. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Request { + /// A request for block headers. + Headers(IncompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(IncompleteHeaderProofRequest), + // TransactionIndex, + /// A request for a block's receipts. + Receipts(IncompleteReceiptsRequest), + /// A request for a block body. + Body(IncompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(IncompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(IncompleteStorageRequest), + /// A request for contract code. + Code(IncompleteCodeRequest), + /// A request for proof of execution, + Execution(IncompleteExecutionRequest), +} + +/// All request types, in an answerable state. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CompleteRequest { + /// A request for block headers. + Headers(CompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(CompleteHeaderProofRequest), + // TransactionIndex, + /// A request for a block's receipts. + Receipts(CompleteReceiptsRequest), + /// A request for a block body. + Body(CompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(CompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(CompleteStorageRequest), + /// A request for contract code. + Code(CompleteCodeRequest), + /// A request for proof of execution, + Execution(CompleteExecutionRequest), +} + +impl Request { + fn kind(&self) -> Kind { + match *self { + Request::Headers(_) => Kind::Headers, + Request::HeaderProof(_) => Kind::HeaderProof, + Request::Receipts(_) => Kind::Receipts, + Request::Body(_) => Kind::Body, + Request::Account(_) => Kind::Account, + Request::Storage(_) => Kind::Storage, + Request::Code(_) => Kind::Code, + Request::Execution(_) => Kind::Execution, + } + } +} + +impl Decodable for Request { + fn decode(rlp: &UntrustedRlp) -> Result { + match rlp.val_at::(0)? { + Kind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), + Kind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Request::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Request::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Request::Code(rlp.val_at(1)?)), + Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)), + } + } +} + +impl Encodable for Request { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&self.kind(), s); + + match *self { + Request::Headers(ref req) => s.append(req), + Request::HeaderProof(ref req) => s.append(req), + Request::Receipts(ref req) => s.append(req), + Request::Body(ref req) => s.append(req), + Request::Account(ref req) => s.append(req), + Request::Storage(ref req) => s.append(req), + Request::Code(ref req) => s.append(req), + Request::Execution(ref req) => s.append(req), + }; + } +} + +impl IncompleteRequest for Request { + type Complete = CompleteRequest; + + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match *self { + Request::Headers(ref req) => req.check_outputs(f), + Request::HeaderProof(ref req) => req.check_outputs(f), + Request::Receipts(ref req) => req.check_outputs(f), + Request::Body(ref req) => req.check_outputs(f), + Request::Account(ref req) => req.check_outputs(f), + Request::Storage(ref req) => req.check_outputs(f), + Request::Code(ref req) => req.check_outputs(f), + Request::Execution(ref req) => req.check_outputs(f), + } + } + + fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { + match *self { + Request::Headers(ref req) => req.note_outputs(f), + Request::HeaderProof(ref req) => req.note_outputs(f), + Request::Receipts(ref req) => req.note_outputs(f), + Request::Body(ref req) => req.note_outputs(f), + Request::Account(ref req) => req.note_outputs(f), + Request::Storage(ref req) => req.note_outputs(f), + Request::Code(ref req) => req.note_outputs(f), + Request::Execution(ref req) => req.note_outputs(f), + } + } + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + match *self { + Request::Headers(ref mut req) => req.fill(oracle), + Request::HeaderProof(ref mut req) => req.fill(oracle), + Request::Receipts(ref mut req) => req.fill(oracle), + Request::Body(ref mut req) => req.fill(oracle), + Request::Account(ref mut req) => req.fill(oracle), + Request::Storage(ref mut req) => req.fill(oracle), + Request::Code(ref mut req) => req.fill(oracle), + Request::Execution(ref mut req) => req.fill(oracle), + } + } + + fn complete(self) -> Result { + match self { + Request::Headers(req) => req.complete().map(CompleteRequest::Headers), + Request::HeaderProof(req) => req.complete().map(CompleteRequest::HeaderProof), + Request::Receipts(req) => req.complete().map(CompleteRequest::Receipts), + Request::Body(req) => req.complete().map(CompleteRequest::Body), + Request::Account(req) => req.complete().map(CompleteRequest::Account), + Request::Storage(req) => req.complete().map(CompleteRequest::Storage), + Request::Code(req) => req.complete().map(CompleteRequest::Code), + Request::Execution(req) => req.complete().map(CompleteRequest::Execution), + } + } +} + +/// Kinds of requests. +/// Doubles as the "ID" field of the request. +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Kind { + /// A request for headers. + Headers = 0, + /// A request for a header proof. + HeaderProof = 1, + // TransactionIndex = 2, + /// A request for block receipts. + Receipts = 3, + /// A request for a block body. + Body = 4, + /// A request for an account + merkle proof. + Account = 5, + /// A request for contract storage + merkle proof + Storage = 6, + /// A request for contract. + Code = 7, + /// A request for transaction execution + state proof. + Execution = 8, +} + +impl Decodable for Kind { + fn decode(rlp: &UntrustedRlp) -> Result { + match rlp.as_val::()? { + 0 => Ok(Kind::Headers), + 1 => Ok(Kind::HeaderProof), + // 2 => Ok(Kind::TransactionIndex), + 3 => Ok(Kind::Receipts), + 4 => Ok(Kind::Body), + 5 => Ok(Kind::Account), + 6 => Ok(Kind::Storage), + 7 => Ok(Kind::Code), + 8 => Ok(Kind::Execution), + _ => Err(DecoderError::Custom("Unknown PIP request ID.")), + } + } +} + +impl Encodable for Kind { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&(*self as u8)); + } +} + +/// All response types. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Response { + /// A response for block headers. + Headers(HeadersResponse), + /// A response for a header proof (from a CHT) + HeaderProof(HeaderProofResponse), + // TransactionIndex, + /// A response for a block's receipts. + Receipts(ReceiptsResponse), + /// A response for a block body. + Body(BodyResponse), + /// A response for a merkle proof of an account. + Account(AccountResponse), + /// A response for a merkle proof of contract storage. + Storage(StorageResponse), + /// A response for contract code. + Code(CodeResponse), + /// A response for proof of execution, + Execution(ExecutionResponse), +} + +impl Response { + /// Fill reusable outputs by writing them into the function. + pub fn fill_outputs(&self, f: F) where F: FnMut(usize, Output) { + match *self { + Response::Headers(ref res) => res.fill_outputs(f), + Response::HeaderProof(ref res) => res.fill_outputs(f), + Response::Receipts(ref res) => res.fill_outputs(f), + Response::Body(ref res) => res.fill_outputs(f), + Response::Account(ref res) => res.fill_outputs(f), + Response::Storage(ref res) => res.fill_outputs(f), + Response::Code(ref res) => res.fill_outputs(f), + Response::Execution(ref res) => res.fill_outputs(f), + } + } + + fn kind(&self) -> Kind { + match *self { + Response::Headers(_) => Kind::Headers, + Response::HeaderProof(_) => Kind::HeaderProof, + Response::Receipts(_) => Kind::Receipts, + Response::Body(_) => Kind::Body, + Response::Account(_) => Kind::Account, + Response::Storage(_) => Kind::Storage, + Response::Code(_) => Kind::Code, + Response::Execution(_) => Kind::Execution, + } + } +} + +impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + match rlp.val_at::(0)? { + Kind::Headers => Ok(Response::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Response::HeaderProof(rlp.val_at(1)?)), + Kind::Receipts => Ok(Response::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Response::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Response::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), + Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)), + } + } +} + +impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&self.kind(), s); + + match *self { + Response::Headers(ref res) => s.append(res), + Response::HeaderProof(ref res) => s.append(res), + Response::Receipts(ref res) => s.append(res), + Response::Body(ref res) => s.append(res), + Response::Account(ref res) => s.append(res), + Response::Storage(ref res) => s.append(res), + Response::Code(ref res) => s.append(res), + Response::Execution(ref res) => s.append(res), + }; + } +} + +/// A potentially incomplete request. +pub trait IncompleteRequest: Sized { + /// The complete variant of this request. + type Complete; + + /// Check prior outputs against the needed inputs. + /// + /// This is called to ensure consistency of this request with + /// others in the same packet. + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>; + + /// Note that this request will produce the following outputs. + fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind); + + /// Fill fields of the request. + /// + /// This function is provided an "output oracle" which allows fetching of + /// prior request outputs. + /// Only outputs previously checked with `check_outputs` may be available. + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result; + + /// Attempt to convert this request into its complete variant. + /// Will succeed if all fields have been filled, will fail otherwise. + fn complete(self) -> Result; +} + +/// Header request. +pub mod header { + use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + + /// Potentially incomplete headers request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Start block. + pub start: Field, + /// Skip between. + pub skip: u64, + /// Maximum to return. + pub max: u64, + /// Whether to reverse from start. + pub reverse: bool, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + start: rlp.val_at(0)?, + skip: rlp.val_at(1)?, + max: rlp.val_at(2)?, + reverse: rlp.val_at(3)? + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4) + .append(&self.start) + .append(&self.skip) + .append(&self.max) + .append(&self.reverse); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.start { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => + f(req, idx, OutputKind::Hash).or_else(|_| f(req, idx, OutputKind::Number)) + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) { } + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.start { + self.start = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + Ok(Output::Number(num)) => Field::Scalar(num.into()), + Err(_) => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + start: self.start.into_scalar()?, + skip: self.skip, + max: self.max, + reverse: self.reverse, + }) + } + } + + /// A complete header request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Start block. + pub start: HashOrNumber, + /// Skip between. + pub skip: u64, + /// Maximum to return. + pub max: u64, + /// Whether to reverse from start. + pub reverse: bool, + } + + /// The output of a request for headers. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The headers requested. + pub headers: Vec, + } + + impl Response { + /// Fill reusable outputs by writing them into the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + use ethcore::header::Header as FullHeader; + + let mut headers = Vec::new(); + + for item in rlp.iter() { + // check that it's a valid encoding. + // TODO: just return full headers here? + let _: FullHeader = item.as_val()?; + headers.push(encoded::Header::new(item.as_raw().to_owned())); + } + + Ok(Response { + headers: headers, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.headers.len()); + for header in &self.headers { + s.append_raw(header.rlp().as_raw(), 1); + } + } + } +} + +/// Request and response for header proofs. +pub mod header_proof { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete header proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block number. + pub num: Field, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + num: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.num); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Number), + } + } + + fn note_outputs(&self, mut note: F) where F: FnMut(usize, OutputKind) { + note(0, OutputKind::Hash); + } + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.num { + self.num = match oracle(req, idx) { + Ok(Output::Number(num)) => Field::Scalar(num.into()), + _ => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + num: self.num.into_scalar()?, + }) + } + } + + /// A complete header proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get a header proof for. + pub num: u64, + } + + /// The output of a request for a header proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion proof of the header and total difficulty in the CHT. + pub proof: Vec, + /// The proved header's hash. + pub hash: H256, + /// The proved header's total difficulty. + pub td: U256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(0, Output::Hash(self.hash)); + } + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + + Ok(Response { + proof: rlp.list_at(0)?, + hash: rlp.val_at(1)?, + td: rlp.val_at(2)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3).begin_list(self.proof.len()); + for item in &self.proof { + s.append_list(&item); + } + + s.append(&self.hash).append(&self.td); + } + } +} + +/// Request and response for block receipts +pub mod block_receipts { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::receipt::Receipt; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use util::H256; + + /// Potentially incomplete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + hash: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.hash); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.hash { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Number(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + hash: self.hash.into_scalar()?, + }) + } + } + + /// A complete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get block receipts for. + pub hash: H256, + } + + /// The output of a request for block receipts. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The block receipts. + pub receipts: Vec + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + + Ok(Response { + receipts: rlp.as_list()?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append_list(&self.receipts); + } + } +} + +/// Request and response for a block body +pub mod block_body { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use util::H256; + + /// Potentially incomplete block body request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + hash: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.hash); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.hash { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + hash: self.hash.into_scalar()?, + }) + } + } + + /// A complete block body request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The hash to get a block body for. + pub hash: H256, + } + + /// The output of a request for block body. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The block body. + pub body: encoded::Body, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + use ethcore::header::Header as FullHeader; + use ethcore::transaction::UnverifiedTransaction; + + // check body validity. + let _: Vec = rlp.list_at(0)?; + let _: Vec = rlp.list_at(1)?; + + Ok(Response { + body: encoded::Body::new(rlp.as_raw().to_owned()), + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append_raw(&self.body.rlp().as_raw(), 1); + } + } +} + +/// A request for an account proof. +pub mod account { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete request for an account proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address_hash: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.block_hash) + .append(&self.address_hash); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } + + Ok(()) + } + + fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { + f(0, OutputKind::Hash); + f(1, OutputKind::Hash); + } + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), + } + } + + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash.into()), + _ => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + }) + } + } + + /// A complete request for an account. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + } + + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Account nonce. + pub nonce: U256, + /// Account balance. + pub balance: U256, + /// Account's code hash. + pub code_hash: H256, + /// Account's storage trie root. + pub storage_root: H256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(0, Output::Hash(self.code_hash)); + f(1, Output::Hash(self.storage_root)); + } + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Response { + proof: rlp.list_at(0)?, + nonce: rlp.val_at(1)?, + balance: rlp.val_at(2)?, + code_hash: rlp.val_at(3)?, + storage_root: rlp.val_at(4)? + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5).begin_list(self.proof.len()); + for item in &self.proof { + s.append_list(&item); + } + + s.append(&self.nonce) + .append(&self.balance) + .append(&self.code_hash) + .append(&self.storage_root); + } + } +} + +/// A request for a storage proof. +pub mod storage { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use util::{Bytes, H256}; + + /// Potentially incomplete request for an storage proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + /// Hash of the storage key. + pub key_hash: Field, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address_hash: rlp.val_at(1)?, + key_hash: rlp.val_at(2)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append(&self.block_hash) + .append(&self.address_hash) + .append(&self.key_hash); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.key_hash { + f(req, idx, OutputKind::Hash)? + } + + Ok(()) + } + + fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { + f(0, OutputKind::Hash); + } + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), + } + } + + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash.into()), + _ => Field::BackReference(req, idx), + } + } + + if let Field::BackReference(req, idx) = self.key_hash { + self.key_hash = match oracle(req, idx) { + Ok(Output::Hash(key_hash)) => Field::Scalar(key_hash.into()), + _ => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + key_hash: self.key_hash.into_scalar()?, + }) + } + } + + /// A complete request for a storage proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + /// Storage key hash. + pub key_hash: H256, + } + + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Storage value. + pub value: H256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(0, Output::Hash(self.value)); + } + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Response { + proof: rlp.list_at(0)?, + value: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2).begin_list(self.proof.len()); + for item in &self.proof { + s.append_list(&item); + } + s.append(&self.value); + } + } +} + +/// A request for contract code. +pub mod contract_code { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use util::{Bytes, H256}; + + /// Potentially incomplete contract code request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The code hash. + pub code_hash: Field, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + code_hash: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.block_hash) + .append(&self.code_hash); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + if let Field::BackReference(req, idx) = self.code_hash { + f(req, idx, OutputKind::Hash)?; + } + + Ok(()) + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), + } + } + + if let Field::BackReference(req, idx) = self.code_hash { + self.code_hash = match oracle(req, idx) { + Ok(Output::Hash(code_hash)) => Field::Scalar(code_hash.into()), + _ => Field::BackReference(req, idx), + } + } + } + + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + code_hash: self.code_hash.into_scalar()?, + }) + } + } + + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The code hash. + pub code_hash: H256, + } + + /// The output of a request for + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The requested code. + pub code: Bytes, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + + Ok(Response { + code: rlp.as_val()?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.code); + } + } +} + +/// A request for proof of execution. +pub mod execution { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::transaction::Action; + use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + use util::{Bytes, Address, U256, H256, DBValue}; + + /// Potentially incomplete execution proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } + + impl Decodable for Incomplete { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + from: rlp.val_at(1)?, + action: rlp.val_at(2)?, + gas: rlp.val_at(3)?, + gas_price: rlp.val_at(4)?, + value: rlp.val_at(5)?, + data: rlp.val_at(6)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(7) + .append(&self.block_hash) + .append(&self.from); + + match self.action { + Action::Create => s.append_empty_data(), + Action::Call(ref addr) => s.append(addr), + }; + + s.append(&self.gas) + .append(&self.gas_price) + .append(&self.value) + .append(&self.data); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + + Ok(()) + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), + } + } + } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + from: self.from, + action: self.action, + gas: self.gas, + gas_price: self.gas_price, + value: self.value, + data: self.data, + }) + } + } + + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } + + /// The output of a request for proof of execution + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// All state items (trie nodes, code) necessary to re-prove the transaction. + pub items: Vec, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } + + impl Decodable for Response { + fn decode(rlp: &UntrustedRlp) -> Result { + let mut items = Vec::new(); + for raw_item in rlp.iter() { + let mut item = DBValue::new(); + item.append_slice(raw_item.data()?); + items.push(item); + } + + Ok(Response { + items: items, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.items.len()); + + for item in &self.items { + s.append(&&**item); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ethcore::header::Header; + + fn check_roundtrip(val: T) + where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug + { + let bytes = ::rlp::encode(&val); + let new_val: T = ::rlp::decode(&bytes); + assert_eq!(val, new_val); + } + + #[test] + fn hash_or_number_roundtrip() { + let hash = HashOrNumber::Hash(H256::default()); + let number = HashOrNumber::Number(5); + + check_roundtrip(hash); + check_roundtrip(number); + } + + #[test] + fn field_roundtrip() { + let field_scalar = Field::Scalar(5usize); + let field_back: Field = Field::BackReference(1, 2); + + check_roundtrip(field_scalar); + check_roundtrip(field_back); + } + + #[test] + fn headers_roundtrip() { + let req = IncompleteHeadersRequest { + start: Field::Scalar(5u64.into()), + skip: 0, + max: 100, + reverse: false, + }; + + let full_req = Request::Headers(req.clone()); + let res = HeadersResponse { + headers: vec![ + ::ethcore::encoded::Header::new(::rlp::encode(&Header::default()).to_vec()) + ] + }; + let full_res = Response::Headers(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn header_proof_roundtrip() { + let req = IncompleteHeaderProofRequest { + num: Field::BackReference(1, 234), + }; + + let full_req = Request::HeaderProof(req.clone()); + let res = HeaderProofResponse { + proof: Vec::new(), + hash: Default::default(), + td: 100.into(), + }; + let full_res = Response::HeaderProof(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn receipts_roundtrip() { + let req = IncompleteReceiptsRequest { + hash: Field::Scalar(Default::default()), + }; + + let full_req = Request::Receipts(req.clone()); + let res = ReceiptsResponse { + receipts: vec![Default::default(), Default::default()], + }; + let full_res = Response::Receipts(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn body_roundtrip() { + let req = IncompleteBodyRequest { + hash: Field::Scalar(Default::default()), + }; + + let full_req = Request::Body(req.clone()); + let res = BodyResponse { + body: { + let mut stream = RlpStream::new_list(2); + stream.begin_list(0).begin_list(0); + ::ethcore::encoded::Body::new(stream.out()) + }, + }; + let full_res = Response::Body(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn account_roundtrip() { + let req = IncompleteAccountRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + }; + + let full_req = Request::Account(req.clone()); + let res = AccountResponse { + proof: Vec::new(), + nonce: 100.into(), + balance: 123456.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }; + let full_res = Response::Account(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn storage_roundtrip() { + let req = IncompleteStorageRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + key_hash: Field::BackReference(3, 2), + }; + + let full_req = Request::Storage(req.clone()); + let res = StorageResponse { + proof: Vec::new(), + value: H256::default(), + }; + let full_res = Response::Storage(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn code_roundtrip() { + let req = IncompleteCodeRequest { + block_hash: Field::Scalar(Default::default()), + code_hash: Field::BackReference(3, 2), + }; + + let full_req = Request::Code(req.clone()); + let res = CodeResponse { + code: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], + }; + let full_res = Response::Code(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn execution_roundtrip() { + use util::DBValue; + + let req = IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::ethcore::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }; + + let full_req = Request::Execution(req.clone()); + let res = ExecutionResponse { + items: vec![DBValue::new(), { + let mut value = DBValue::new(); + value.append_slice(&[1, 1, 1, 2, 3]); + value + }], + }; + let full_res = Response::Execution(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn vec_test() { + use rlp::*; + + let reqs: Vec<_> = (0..10).map(|_| IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::ethcore::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }).map(Request::Execution).collect(); + + let mut stream = RlpStream::new_list(2); + stream.append(&100usize).append_list(&reqs); + let out = stream.out(); + + let rlp = UntrustedRlp::new(&out); + assert_eq!(rlp.val_at::(0).unwrap(), 100usize); + assert_eq!(rlp.list_at::(1).unwrap(), reqs); + } +} diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index d284954e7..978f7a99c 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1607,23 +1607,14 @@ impl MayPanic for Client { } impl ::client::ProvingBlockChainClient for Client { - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec { + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)> { self.state_at(id) - .and_then(move |state| state.prove_storage(key1, key2, from_level).ok()) - .unwrap_or_else(Vec::new) + .and_then(move |state| state.prove_storage(key1, key2).ok()) } - fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec { + fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, ::types::basic_account::BasicAccount)> { self.state_at(id) - .and_then(move |state| state.prove_account(key1, from_level).ok()) - .unwrap_or_else(Vec::new) - } - - fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes { - self.state_at(id) - .and_then(move |state| state.code_by_address_hash(account_key).ok()) - .and_then(|x| x) - .unwrap_or_else(Vec::new) + .and_then(move |state| state.prove_account(key1).ok()) } fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { @@ -1643,7 +1634,6 @@ impl ::client::ProvingBlockChainClient for Client { _ => return Some(state.drop().1.extract_proof()), } } - } impl Drop for Client { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index d96b305de..16f38203f 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -38,6 +38,7 @@ use error::{ImportResult, Error as EthcoreError}; use evm::{Factory as EvmFactory, VMType, Schedule}; use miner::{Miner, MinerService, TransactionImportResult}; use spec::Spec; +use types::basic_account::BasicAccount; use types::mode::Mode; use types::pruning_info::PruningInfo; @@ -758,16 +759,12 @@ impl BlockChainClient for TestBlockChainClient { } impl ProvingBlockChainClient for TestBlockChainClient { - fn prove_storage(&self, _: H256, _: H256, _: u32, _: BlockId) -> Vec { - Vec::new() + fn prove_storage(&self, _: H256, _: H256, _: BlockId) -> Option<(Vec, H256)> { + None } - fn prove_account(&self, _: H256, _: u32, _: BlockId) -> Vec { - Vec::new() - } - - fn code_by_hash(&self, _: H256, _: BlockId) -> Bytes { - Vec::new() + fn prove_account(&self, _: H256, _: BlockId) -> Option<(Vec, BasicAccount)> { + None } fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option> { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index b4c284f11..a612d8a77 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -34,6 +34,7 @@ use env_info::LastHashes; use block_import_error::BlockImportError; use ipc::IpcConfig; use types::ids::*; +use types::basic_account::BasicAccount; use types::trace_filter::Filter as TraceFilter; use types::call_analytics::CallAnalytics; use types::blockchain_info::BlockChainInfo; @@ -315,19 +316,12 @@ pub trait ProvingBlockChainClient: BlockChainClient { /// /// Both provided keys assume a secure trie. /// Returns a vector of raw trie nodes (in order from the root) proving the storage query. - /// Nodes after `from_level` may be omitted. - /// An empty vector indicates unservable query. - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec; + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)>; /// Prove account existence at a specific block id. /// The key is the keccak hash of the account's address. /// Returns a vector of raw trie nodes (in order from the root) proving the query. - /// Nodes after `from_level` may be omitted. - /// An empty vector indicates unservable query. - fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec; - - /// Get code by address hash. - fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes; + fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, BasicAccount)>; /// Prove execution of a transaction at the given block. fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option>; diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 9e762979b..d8aad430a 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -438,18 +438,19 @@ impl Account { /// trie. /// `storage_key` is the hash of the desired storage key, meaning /// this will only work correctly under a secure trie. - /// Returns a merkle proof of the storage trie node with all nodes before `from_level` - /// omitted. - pub fn prove_storage(&self, db: &HashDB, storage_key: H256, from_level: u32) -> Result, Box> { + pub fn prove_storage(&self, db: &HashDB, storage_key: H256) -> Result<(Vec, H256), Box> { use util::trie::{Trie, TrieDB}; use util::trie::recorder::Recorder; - let mut recorder = Recorder::with_depth(from_level); + let mut recorder = Recorder::new(); let trie = TrieDB::new(db, &self.storage_root)?; - let _ = trie.get_with(&storage_key, &mut recorder)?; + let item: U256 = { + let query = (&mut recorder, ::rlp::decode); + trie.get_with(&storage_key, query)?.unwrap_or_else(U256::zero) + }; - Ok(recorder.drain().into_iter().map(|r| r.data).collect()) + Ok((recorder.drain().into_iter().map(|r| r.data).collect(), item.into())) } } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 6e2a956ab..26d33d152 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -31,6 +31,7 @@ use factory::Factories; use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; +use types::basic_account::BasicAccount; use types::executed::{Executed, ExecutionError}; use types::state_diff::StateDiff; use transaction::SignedTransaction; @@ -857,47 +858,43 @@ impl State { // State proof implementations; useful for light client protocols. impl State { /// Prove an account's existence or nonexistence in the state trie. - /// Returns a merkle proof of the account's trie node with all nodes before `from_level` - /// omitted or an encountered trie error. + /// Returns a merkle proof of the account's trie node omitted or an encountered trie error. + /// If the account doesn't exist in the trie, prove that and return defaults. /// Requires a secure trie to be used for accurate results. /// `account_key` == sha3(address) - pub fn prove_account(&self, account_key: H256, from_level: u32) -> trie::Result> { - let mut recorder = Recorder::with_depth(from_level); + pub fn prove_account(&self, account_key: H256) -> trie::Result<(Vec, BasicAccount)> { + let mut recorder = Recorder::new(); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; - trie.get_with(&account_key, &mut recorder)?; + let maybe_account: Option = { + let query = (&mut recorder, ::rlp::decode); + trie.get_with(&account_key, query)? + }; + let account = maybe_account.unwrap_or_else(|| BasicAccount { + balance: 0.into(), + nonce: self.account_start_nonce, + code_hash: SHA3_EMPTY, + storage_root: ::util::sha3::SHA3_NULL_RLP, + }); - Ok(recorder.drain().into_iter().map(|r| r.data).collect()) + Ok((recorder.drain().into_iter().map(|r| r.data).collect(), account)) } /// Prove an account's storage key's existence or nonexistence in the state. - /// Returns a merkle proof of the account's storage trie with all nodes before - /// `from_level` omitted. Requires a secure trie to be used for correctness. + /// Returns a merkle proof of the account's storage trie. + /// Requires a secure trie to be used for correctness. /// `account_key` == sha3(address) /// `storage_key` == sha3(key) - pub fn prove_storage(&self, account_key: H256, storage_key: H256, from_level: u32) -> trie::Result> { + pub fn prove_storage(&self, account_key: H256, storage_key: H256) -> trie::Result<(Vec, H256)> { // TODO: probably could look into cache somehow but it's keyed by // address, not sha3(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let acc = match trie.get_with(&account_key, Account::from_rlp)? { Some(acc) => acc, - None => return Ok(Vec::new()), + None => return Ok((Vec::new(), H256::new())), }; let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account_key); - acc.prove_storage(account_db.as_hashdb(), storage_key, from_level) - } - - /// Get code by address hash. - /// Only works when backed by a secure trie. - pub fn code_by_address_hash(&self, account_key: H256) -> trie::Result> { - let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; - let mut acc = match trie.get_with(&account_key, Account::from_rlp)? { - Some(acc) => acc, - None => return Ok(None), - }; - - let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account_key); - Ok(acc.cache_code(account_db.as_hashdb()).map(|c| (&*c).clone())) + acc.prove_storage(account_db.as_hashdb(), storage_key) } } diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 8ff08b965..8a99a7239 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -268,7 +268,10 @@ impl LightDispatcher { })); match nonce_future { - Some(x) => x.map(|acc| acc.nonce).map_err(|_| errors::no_light_peers()).boxed(), + Some(x) => + x.map(|acc| acc.map_or_else(Default::default, |acc| acc.nonce)) + .map_err(|_| errors::no_light_peers()) + .boxed(), None => future::err(errors::network_disabled()).boxed() } } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index c6f0d709d..251daf90d 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -105,15 +105,22 @@ impl EthClient { match cht_root { None => return future::ok(None).boxed(), Some(root) => { - let req = request::HeaderByNumber::new(n, root) + let req = request::HeaderProof::new(n, root) .expect("only fails for 0; client always stores genesis; client already queried; qed"); - self.sync.with_context(|ctx| - self.on_demand.header_by_number(ctx, req) - .map(Some) - .map_err(err_premature_cancel) - .boxed() - ) + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + self.sync.with_context(|ctx| { + let fut = self.on_demand.hash_by_number(ctx, req) + .map(request::HeaderByHash) + .map_err(err_premature_cancel); + + fut.and_then(move |req| { + match sync.with_context(|ctx| on_demand.header_by_hash(ctx, req)) { + Some(fut) => fut.map_err(err_premature_cancel).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + }).map(Some).boxed() + }) } } } @@ -149,7 +156,7 @@ impl EthClient { sync.with_context(|ctx| on_demand.account(ctx, request::Account { header: header, address: address, - }).map(Some)) + })) .map(|x| x.map_err(err_premature_cancel).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) }).boxed() diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index fba89dd7b..4590103e7 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -16,7 +16,7 @@ //! Light client synchronization. //! -//! This will synchronize the header chain using LES messages. +//! This will synchronize the header chain using PIP messages. //! Dataflow is largely one-directional as headers are pushed into //! the light client queue for import. Where possible, they are batched //! in groups. @@ -36,14 +36,15 @@ use std::collections::HashMap; use std::mem; use std::sync::Arc; +use ethcore::encoded; use light::client::{AsLightClient, LightChainClient}; use light::net::{ Announcement, Handler, BasicContext, EventContext, - Capabilities, ReqId, Status, + Capabilities, ReqId, Status, Error as NetError, }; -use light::request; +use light::request::{self, CompleteHeadersRequest as HeadersRequest}; use network::PeerId; -use util::{Bytes, U256, H256, Mutex, RwLock}; +use util::{U256, H256, Mutex, RwLock}; use rand::{Rng, OsRng}; use self::sync_round::{AbortReason, SyncRound, ResponseContext}; @@ -91,7 +92,7 @@ impl Peer { #[derive(Debug)] enum AncestorSearch { Queued(u64), // queued to search for blocks starting from here. - Awaiting(ReqId, u64, request::Headers), // awaiting response for this request. + Awaiting(ReqId, u64, HeadersRequest), // awaiting response for this request. Prehistoric, // prehistoric block found. TODO: start to roll back CHTs. FoundCommon(u64, H256), // common block found. Genesis, // common ancestor is the genesis. @@ -113,7 +114,7 @@ impl AncestorSearch { match self { AncestorSearch::Awaiting(id, start, req) => { if &id == ctx.req_id() { - match response::decode_and_verify(ctx.data(), &req) { + match response::verify(ctx.data(), &req) { Ok(headers) => { for header in &headers { if client.is_known(&header.hash()) { @@ -150,17 +151,17 @@ impl AncestorSearch { } fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch - where F: FnMut(request::Headers) -> Option + where F: FnMut(HeadersRequest) -> Option { - const BATCH_SIZE: usize = 64; + const BATCH_SIZE: u64 = 64; match self { AncestorSearch::Queued(start) => { - let batch_size = ::std::cmp::min(start as usize, BATCH_SIZE); + let batch_size = ::std::cmp::min(start, BATCH_SIZE); trace!(target: "sync", "Requesting {} reverse headers from {} to find common ancestor", batch_size, start); - let req = request::Headers { + let req = HeadersRequest { start: start.into(), max: batch_size, skip: 0, @@ -193,13 +194,13 @@ struct ResponseCtx<'a> { peer: PeerId, req_id: ReqId, ctx: &'a BasicContext, - data: &'a [Bytes], + data: &'a [encoded::Header], } impl<'a> ResponseContext for ResponseCtx<'a> { fn responder(&self) -> PeerId { self.peer } fn req_id(&self) -> &ReqId { &self.req_id } - fn data(&self) -> &[Bytes] { self.data } + fn data(&self) -> &[encoded::Header] { self.data } fn punish_responder(&self) { self.ctx.disable_peer(self.peer) } } @@ -313,11 +314,22 @@ impl Handler for LightSync { self.maintain_sync(ctx.as_basic()); } - fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - if !self.peers.read().contains_key(&ctx.peer()) { + fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[request::Response]) { + let peer = ctx.peer(); + if !self.peers.read().contains_key(&peer) { return } + let headers = match responses.get(0) { + Some(&request::Response::Headers(ref response)) => &response.headers[..], + Some(_) => { + trace!("Disabling peer {} for wrong response type.", peer); + ctx.disable_peer(peer); + &[] + } + None => &[], + }; + { let mut state = self.state.lock(); @@ -465,18 +477,27 @@ impl LightSync { // naive request dispatcher: just give to any peer which says it will // give us responses. - let dispatcher = move |req: request::Headers| { + let dispatcher = move |req: HeadersRequest| { rng.shuffle(&mut peer_ids); + let request = { + let mut builder = request::RequestBuilder::default(); + builder.push(request::Request::Headers(request::IncompleteHeadersRequest { + start: req.start.into(), + skip: req.skip, + max: req.max, + reverse: req.reverse, + })).expect("request provided fully complete with no unresolved back-references; qed"); + builder.build() + }; for peer in &peer_ids { - if ctx.max_requests(*peer, request::Kind::Headers) >= req.max { - match ctx.request_from(*peer, request::Request::Headers(req.clone())) { - Ok(id) => { - return Some(id) - } - Err(e) => - trace!(target: "sync", "Error requesting headers from viable peer: {}", e), + match ctx.request_from(*peer, request.clone()) { + Ok(id) => { + return Some(id) } + Err(NetError::NoCredits) => {} + Err(e) => + trace!(target: "sync", "Error requesting headers from viable peer: {}", e), } } diff --git a/sync/src/light_sync/response.rs b/sync/src/light_sync/response.rs index 0629da956..d85d2548d 100644 --- a/sync/src/light_sync/response.rs +++ b/sync/src/light_sync/response.rs @@ -18,10 +18,11 @@ use std::fmt; +use ethcore::encoded; use ethcore::header::Header; -use light::request::{HashOrNumber, Headers as HeadersRequest}; -use rlp::{DecoderError, UntrustedRlp}; -use util::{Bytes, H256}; +use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest}; +use rlp::DecoderError; +use util::H256; /// Errors found when decoding headers and verifying with basic constraints. #[derive(Debug, PartialEq)] @@ -71,13 +72,13 @@ pub trait Constraint { fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>; } -/// Decode a response and do basic verification against a request. -pub fn decode_and_verify(headers: &[Bytes], request: &HeadersRequest) -> Result, BasicError> { - let headers: Vec<_> = try!(headers.iter().map(|x| UntrustedRlp::new(&x).as_val()).collect()); +/// Do basic verification of provided headers against a request. +pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result, BasicError> { + let headers: Vec<_> = headers.iter().map(|h| h.decode()).collect(); let reverse = request.reverse; - try!(Max(request.max).verify(&headers, reverse)); + try!(Max(request.max as usize).verify(&headers, reverse)); match request.start { HashOrNumber::Number(ref num) => try!(StartsAtNumber(*num).verify(&headers, reverse)), HashOrNumber::Hash(ref hash) => try!(StartsAtHash(*hash).verify(&headers, reverse)), @@ -150,8 +151,9 @@ impl Constraint for Max { #[cfg(test)] mod tests { + use ethcore::encoded; use ethcore::header::Header; - use light::request::Headers as HeadersRequest; + use light::request::CompleteHeadersRequest as HeadersRequest; use super::*; @@ -175,10 +177,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert!(decode_and_verify(&headers, &request).is_ok()); + assert!(verify(&headers, &request).is_ok()); } #[test] @@ -201,10 +203,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert!(decode_and_verify(&headers, &request).is_ok()); + assert!(verify(&headers, &request).is_ok()); } #[test] @@ -227,10 +229,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); + assert_eq!(verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); } #[test] @@ -246,9 +248,9 @@ mod tests { let mut header = Header::default(); header.set_number(x); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); + assert_eq!(verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); } } diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 6fa635214..dfa17aad4 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -20,13 +20,14 @@ use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::fmt; +use ethcore::encoded; use ethcore::header::Header; use light::net::ReqId; -use light::request::Headers as HeadersRequest; +use light::request::CompleteHeadersRequest as HeadersRequest; use network::PeerId; -use util::{Bytes, H256}; +use util::H256; use super::response; @@ -40,7 +41,7 @@ pub trait ResponseContext { /// Get the request ID this response corresponds to. fn req_id(&self) -> &ReqId; /// Get the (unverified) response data. - fn data(&self) -> &[Bytes]; + fn data(&self) -> &[encoded::Header]; /// Punish the responder. fn punish_responder(&self); } @@ -114,7 +115,7 @@ impl Fetcher { let needed_headers = HeadersRequest { start: high_rung.parent_hash().clone().into(), - max: diff as usize - 1, + max: diff - 1, skip: 0, reverse: true, }; @@ -190,7 +191,7 @@ impl Fetcher { return SyncRound::Fetch(self); } - match response::decode_and_verify(headers, &request.headers_request) { + match response::verify(headers, &request.headers_request) { Err(e) => { trace!(target: "sync", "Punishing peer {} for invalid response ({})", ctx.responder(), e); ctx.punish_responder(); @@ -286,21 +287,21 @@ impl Fetcher { } // Compute scaffold parameters from non-zero distance between start and target block: (skip, pivots). -fn scaffold_params(diff: u64) -> (u64, usize) { +fn scaffold_params(diff: u64) -> (u64, u64) { // default parameters. // amount of blocks between each scaffold pivot. const ROUND_SKIP: u64 = 255; // amount of scaffold pivots: these are the Xs in "X___X___X" - const ROUND_PIVOTS: usize = 256; + const ROUND_PIVOTS: u64 = 256; let rem = diff % (ROUND_SKIP + 1); if diff <= ROUND_SKIP { // just request headers from the start to the target. - (0, rem as usize) + (0, rem) } else { // the number of pivots necessary to exactly hit or overshoot the target. let pivots_to_target = (diff / (ROUND_SKIP + 1)) + if rem == 0 { 0 } else { 1 }; - let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS as u64) as usize; + let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS); (ROUND_SKIP, num_pivots) } } @@ -319,7 +320,7 @@ pub struct RoundStart { contributors: HashSet, attempt: usize, skip: u64, - pivots: usize, + pivots: u64, } impl RoundStart { @@ -372,7 +373,7 @@ impl RoundStart { } }; - match response::decode_and_verify(ctx.data(), &req) { + match response::verify(ctx.data(), &req) { Ok(headers) => { if self.sparse_headers.len() == 0 && headers.get(0).map_or(false, |x| x.parent_hash() != &self.start_block.1) { @@ -383,7 +384,7 @@ impl RoundStart { self.contributors.insert(ctx.responder()); self.sparse_headers.extend(headers); - if self.sparse_headers.len() == self.pivots { + if self.sparse_headers.len() as u64 == self.pivots { return if self.skip == 0 { SyncRound::abort(AbortReason::TargetReached, self.sparse_headers.into()) } else { @@ -429,7 +430,7 @@ impl RoundStart { let start = (self.start_block.0 + 1) + self.sparse_headers.len() as u64 * (self.skip + 1); - let max = self.pivots - self.sparse_headers.len(); + let max = self.pivots - self.sparse_headers.len() as u64; let headers_request = HeadersRequest { start: start.into(), diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index d0e472374..898f8766d 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -28,6 +28,7 @@ use io::IoChannel; use light::client::Client as LightClient; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::net::request_credits::FlowParams; +use light::provider::LightProvider; use network::{NodeId, PeerId}; use util::RwLock; @@ -71,7 +72,7 @@ enum PeerData { } // test peer type. -// Either a full peer or a LES peer. +// Either a full peer or a light peer. pub struct Peer { proto: LightProtocol, queue: RwLock>, @@ -115,7 +116,8 @@ impl Peer { }, }; - let mut proto = LightProtocol::new(chain.clone(), params); + let provider = LightProvider::new(chain.clone(), Arc::new(RwLock::new(Default::default()))); + let mut proto = LightProtocol::new(Arc::new(provider), params); proto.add_handler(sync.clone()); Peer { proto: proto, From 9fdd0e3a0a279b8b8574192c80374c2dcdb09619 Mon Sep 17 00:00:00 2001 From: keorn Date: Thu, 23 Mar 2017 12:19:28 +0000 Subject: [PATCH 09/11] Switching ValidatorSet (#4961) * add multi validator set * nicer comment * validate in constructor * reporting --- ethcore/res/validator_multi.json | 42 ++++++ ethcore/src/engines/authority_round.rs | 2 +- ethcore/src/engines/basic_authority.rs | 2 +- ethcore/src/engines/tendermint/mod.rs | 2 +- ethcore/src/engines/validator_set/mod.rs | 15 +- ethcore/src/engines/validator_set/multi.rs | 158 +++++++++++++++++++++ ethcore/src/spec/spec.rs | 4 + json/src/spec/validator_set.rs | 17 ++- 8 files changed, 231 insertions(+), 11 deletions(-) create mode 100644 ethcore/res/validator_multi.json create mode 100644 ethcore/src/engines/validator_set/multi.rs diff --git a/ethcore/res/validator_multi.json b/ethcore/res/validator_multi.json new file mode 100644 index 000000000..5d51b73da --- /dev/null +++ b/ethcore/res/validator_multi.json @@ -0,0 +1,42 @@ +{ + "name": "TestMutiValidator", + "engine": { + "basicAuthority": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "durationLimit": "0x0d", + "validators": { + "multi": { + "0": { "list": ["0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1"] }, + "2": { "list": ["0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e"] } + } + } + } + } + }, + "params": { + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x69" + }, + "genesis": { + "seal": { + "generic": "0xc180" + }, + "difficulty": "0x20000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x", + "gasLimit": "0x2fefd8" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1": { "balance": "99999999999999999999999" }, + "0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e": { "balance": "99999999999999999999999" } + } +} diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index 2a18c748d..4f823fa8d 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -82,7 +82,7 @@ pub struct AuthorityRound { proposed: AtomicBool, client: RwLock>>, signer: EngineSigner, - validators: Box, + validators: Box, /// Is this Engine just for testing (prevents step calibration). calibrate_step: bool, } diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index d8a1df947..e5a53d4e9 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -58,7 +58,7 @@ pub struct BasicAuthority { gas_limit_bound_divisor: U256, builtins: BTreeMap, signer: EngineSigner, - validators: Box, + validators: Box, } impl BasicAuthority { diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index 46e67a2a8..464e102de 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -98,7 +98,7 @@ pub struct Tendermint { /// Hash of the proposal parent block. proposal_parent: RwLock, /// Set used to determine the current validators. - validators: Box, + validators: Box, } impl Tendermint { diff --git a/ethcore/src/engines/validator_set/mod.rs b/ethcore/src/engines/validator_set/mod.rs index 3e86c357f..cbbedfb33 100644 --- a/ethcore/src/engines/validator_set/mod.rs +++ b/ethcore/src/engines/validator_set/mod.rs @@ -19,6 +19,7 @@ mod simple_list; mod safe_contract; mod contract; +mod multi; use std::sync::Weak; use util::{Address, H256}; @@ -27,23 +28,27 @@ use client::Client; use self::simple_list::SimpleList; use self::contract::ValidatorContract; use self::safe_contract::ValidatorSafeContract; +use self::multi::Multi; /// Creates a validator set from spec. -pub fn new_validator_set(spec: ValidatorSpec) -> Box { +pub fn new_validator_set(spec: ValidatorSpec) -> Box { match spec { ValidatorSpec::List(list) => Box::new(SimpleList::new(list.into_iter().map(Into::into).collect())), ValidatorSpec::SafeContract(address) => Box::new(ValidatorSafeContract::new(address.into())), ValidatorSpec::Contract(address) => Box::new(ValidatorContract::new(address.into())), + ValidatorSpec::Multi(sequence) => Box::new( + Multi::new(sequence.into_iter().map(|(block, set)| (block.into(), new_validator_set(set))).collect()) + ), } } -pub trait ValidatorSet { +pub trait ValidatorSet: Send + Sync { /// Checks if a given address is a validator. - fn contains(&self, bh: &H256, address: &Address) -> bool; + fn contains(&self, parent_block_hash: &H256, address: &Address) -> bool; /// Draws an validator nonce modulo number of validators. - fn get(&self, bh: &H256, nonce: usize) -> Address; + fn get(&self, parent_block_hash: &H256, nonce: usize) -> Address; /// Returns the current number of validators. - fn count(&self, bh: &H256) -> usize; + fn count(&self, parent_block_hash: &H256) -> usize; /// Notifies about malicious behaviour. fn report_malicious(&self, _validator: &Address) {} /// Notifies about benign misbehaviour. diff --git a/ethcore/src/engines/validator_set/multi.rs b/ethcore/src/engines/validator_set/multi.rs new file mode 100644 index 000000000..5027f23cd --- /dev/null +++ b/ethcore/src/engines/validator_set/multi.rs @@ -0,0 +1,158 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +/// Validator set changing at fork blocks. + +use std::collections::BTreeMap; +use std::sync::Weak; +use util::{H256, Address, RwLock}; +use ids::BlockId; +use header::BlockNumber; +use client::{Client, BlockChainClient}; +use super::ValidatorSet; + +type BlockNumberLookup = Box Result + Send + Sync + 'static>; + +pub struct Multi { + sets: BTreeMap>, + block_number: RwLock, +} + +impl Multi { + pub fn new(set_map: BTreeMap>) -> Self { + assert!(set_map.get(&0u64).is_some(), "ValidatorSet has to be specified from block 0."); + Multi { + sets: set_map, + block_number: RwLock::new(Box::new(move |_| Err("No client!".into()))), + } + } + + fn correct_set(&self, bh: &H256) -> Option<&Box> { + match self + .block_number + .read()(bh) + .map(|parent_block| self + .sets + .iter() + .rev() + .find(|&(block, _)| *block <= parent_block + 1) + .expect("constructor validation ensures that there is at least one validator set for block 0; + block 0 is less than any uint; + qed") + ) { + Ok((block, set)) => { + trace!(target: "engine", "Multi ValidatorSet retrieved for block {}.", block); + Some(set) + }, + Err(e) => { + debug!(target: "engine", "ValidatorSet could not be recovered: {}", e); + None + }, + } + } +} + +impl ValidatorSet for Multi { + fn contains(&self, bh: &H256, address: &Address) -> bool { + self.correct_set(bh).map_or(false, |set| set.contains(bh, address)) + } + + fn get(&self, bh: &H256, nonce: usize) -> Address { + self.correct_set(bh).map_or_else(Default::default, |set| set.get(bh, nonce)) + } + + fn count(&self, bh: &H256) -> usize { + self.correct_set(bh).map_or_else(usize::max_value, |set| set.count(bh)) + } + + fn report_malicious(&self, validator: &Address) { + for set in self.sets.values() { + set.report_malicious(validator); + } + } + + fn report_benign(&self, validator: &Address) { + for set in self.sets.values() { + set.report_benign(validator); + } + } + + fn register_contract(&self, client: Weak) { + for set in self.sets.values() { + set.register_contract(client.clone()); + } + *self.block_number.write() = Box::new(move |hash| client + .upgrade() + .ok_or("No client!".into()) + .and_then(|c| c.block_number(BlockId::Hash(*hash)).ok_or("Unknown block".into()))); + } +} + +#[cfg(test)] +mod tests { + use util::*; + use types::ids::BlockId; + use spec::Spec; + use account_provider::AccountProvider; + use client::{BlockChainClient, EngineClient}; + use ethkey::Secret; + use miner::MinerService; + use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data}; + + #[test] + fn uses_current_set() { + ::env_logger::init().unwrap(); + let tap = Arc::new(AccountProvider::transient_provider()); + let s0 = Secret::from_slice(&"0".sha3()).unwrap(); + let v0 = tap.insert_account(s0.clone(), "").unwrap(); + let v1 = tap.insert_account(Secret::from_slice(&"1".sha3()).unwrap(), "").unwrap(); + let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_multi, Some(tap)); + client.engine().register_client(Arc::downgrade(&client)); + + // Make sure txs go through. + client.miner().set_gas_floor_target(1_000_000.into()); + + // Wrong signer for the first block. + client.miner().set_engine_signer(v1, "".into()).unwrap(); + client.transact_contract(Default::default(), Default::default()).unwrap(); + client.update_sealing(); + assert_eq!(client.chain_info().best_block_number, 0); + // Right signer for the first block. + client.miner().set_engine_signer(v0, "".into()).unwrap(); + client.update_sealing(); + assert_eq!(client.chain_info().best_block_number, 1); + // This time v0 is wrong. + client.transact_contract(Default::default(), Default::default()).unwrap(); + client.update_sealing(); + assert_eq!(client.chain_info().best_block_number, 1); + client.miner().set_engine_signer(v1, "".into()).unwrap(); + client.update_sealing(); + assert_eq!(client.chain_info().best_block_number, 2); + // v1 is still good. + client.transact_contract(Default::default(), Default::default()).unwrap(); + client.update_sealing(); + assert_eq!(client.chain_info().best_block_number, 3); + + // Check syncing. + let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]); + sync_client.engine().register_client(Arc::downgrade(&sync_client)); + for i in 1..4 { + sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap(); + } + sync_client.flush_queue(); + assert_eq!(sync_client.chain_info().best_block_number, 3); + } +} diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 21c07c9a3..455d0745f 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -360,6 +360,10 @@ impl Spec { /// Account is marked with `reportBenign` it can be checked as disliked with "0xd8f2e0bf". /// Validator can be removed with `reportMalicious`. pub fn new_validator_contract() -> Self { load_bundled!("validator_contract") } + + /// Create a new Spec with BasicAuthority which uses multiple validator sets changing with height. + /// Account with secrets "0".sha3() is the validator for block 1 and with "1".sha3() onwards. + pub fn new_validator_multi() -> Self { load_bundled!("validator_multi") } } #[cfg(test)] diff --git a/json/src/spec/validator_set.rs b/json/src/spec/validator_set.rs index 080a36c50..f433caa03 100644 --- a/json/src/spec/validator_set.rs +++ b/json/src/spec/validator_set.rs @@ -16,6 +16,8 @@ //! Validator set deserialization. +use std::collections::BTreeMap; +use uint::Uint; use hash::Address; /// Different ways of specifying validators. @@ -30,6 +32,9 @@ pub enum ValidatorSet { /// Address of a contract that indicates the list of authorities and enables reporting of theor misbehaviour using transactions. #[serde(rename="contract")] Contract(Address), + /// A map of starting blocks for each validator set. + #[serde(rename="multi")] + Multi(BTreeMap), } #[cfg(test)] @@ -40,11 +45,17 @@ mod tests { #[test] fn validator_set_deserialization() { let s = r#"[{ - "list" : ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] + "list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] }, { - "safeContract" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b" + "safeContract": "0xc6d9d2cd449a754c494264e1809c50e34d64562b" }, { - "contract" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b" + "contract": "0xc6d9d2cd449a754c494264e1809c50e34d64562b" + }, { + "multi": { + "0": { "list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] }, + "10": { "list": ["0xd6d9d2cd449a754c494264e1809c50e34d64562b"] }, + "20": { "contract": "0xc6d9d2cd449a754c494264e1809c50e34d64562b" } + } }]"#; let _deserialized: Vec = serde_json::from_str(s).unwrap(); From bb1bbebfd63b5870478ff4bcd60d4a1ac1e00d36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 23 Mar 2017 13:23:03 +0100 Subject: [PATCH 10/11] Export account RPC (#4967) * Export account RPC * Removing GethDirectory and ParityDirectory * Updating ethstore-cli help. --- ethcore/src/account_provider/mod.rs | 13 ++- ethstore/src/account/safe_account.rs | 14 +++ ethstore/src/bin/ethstore.rs | 18 ++-- ethstore/src/dir/geth.rs | 102 --------------------- ethstore/src/dir/memory.rs | 1 + ethstore/src/dir/mod.rs | 14 +-- ethstore/src/dir/parity.rs | 81 ---------------- ethstore/src/dir/paths.rs | 96 +++++++++++++++++++ ethstore/src/error.rs | 17 ++++ ethstore/src/ethstore.rs | 99 +++++++++++--------- ethstore/src/import.rs | 21 +---- ethstore/src/json/key_file.rs | 22 ++++- ethstore/src/json/mod.rs | 2 +- ethstore/src/lib.rs | 8 +- ethstore/src/presale.rs | 3 + ethstore/src/secret_store.rs | 28 +++++- rpc/src/v1/impls/parity_accounts.rs | 12 +++ rpc/src/v1/tests/mocked/parity_accounts.rs | 27 ++++++ rpc/src/v1/traits/parity_accounts.rs | 5 + 19 files changed, 318 insertions(+), 265 deletions(-) delete mode 100755 ethstore/src/dir/geth.rs delete mode 100755 ethstore/src/dir/parity.rs create mode 100644 ethstore/src/dir/paths.rs diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index f9b7727db..0ecbf3b17 100755 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -24,14 +24,16 @@ use std::fmt; use std::collections::{HashMap, HashSet}; use std::time::{Instant, Duration}; use util::{RwLock}; -use ethstore::{SimpleSecretStore, SecretStore, Error as SSError, EthStore, EthMultiStore, - random_string, SecretVaultRef, StoreAccountRef}; +use ethstore::{ + SimpleSecretStore, SecretStore, Error as SSError, EthStore, EthMultiStore, + random_string, SecretVaultRef, StoreAccountRef, +}; use ethstore::dir::MemoryDirectory; use ethstore::ethkey::{Address, Message, Public, Secret, Random, Generator}; use ethjson::misc::AccountMeta; use hardware_wallet::{Error as HardwareError, HardwareWalletManager, KeyPath}; pub use ethstore::ethkey::Signature; -pub use ethstore::{Derivation, IndexDerivation}; +pub use ethstore::{Derivation, IndexDerivation, KeyFile}; /// Type of unlock. #[derive(Clone)] @@ -500,6 +502,11 @@ impl AccountProvider { self.sstore.change_password(&self.sstore.account_ref(address)?, &password, &new_password) } + /// Exports an account for given address. + pub fn export_account(&self, address: &Address, password: String) -> Result { + self.sstore.export_account(&self.sstore.account_ref(address)?, &password) + } + /// Helper method used for unlocking accounts. fn unlock_account(&self, address: Address, password: String, unlock: Unlock) -> Result<(), Error> { // verify password by signing dump message diff --git a/ethstore/src/account/safe_account.rs b/ethstore/src/account/safe_account.rs index d628b56ac..e0512fe8d 100755 --- a/ethstore/src/account/safe_account.rs +++ b/ethstore/src/account/safe_account.rs @@ -19,14 +19,22 @@ use {json, Error, crypto}; use account::Version; use super::crypto::Crypto; +/// Account representation. #[derive(Debug, PartialEq, Clone)] pub struct SafeAccount { + /// Account ID pub id: [u8; 16], + /// Account version pub version: Version, + /// Account address pub address: Address, + /// Account private key derivation definition. pub crypto: Crypto, + /// Account filename pub filename: Option, + /// Account name pub name: String, + /// Account metadata pub meta: String, } @@ -44,6 +52,7 @@ impl Into for SafeAccount { } impl SafeAccount { + /// Create a new account pub fn create( keypair: &KeyPair, id: [u8; 16], @@ -114,21 +123,25 @@ impl SafeAccount { }) } + /// Sign a message. pub fn sign(&self, password: &str, message: &Message) -> Result { let secret = self.crypto.secret(password)?; sign(&secret, message).map_err(From::from) } + /// Decrypt a message. pub fn decrypt(&self, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { let secret = self.crypto.secret(password)?; crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from) } + /// Derive public key. pub fn public(&self, password: &str) -> Result { let secret = self.crypto.secret(password)?; Ok(KeyPair::from_secret(secret)?.public().clone()) } + /// Change account's password. pub fn change_password(&self, old_password: &str, new_password: &str, iterations: u32) -> Result { let secret = self.crypto.secret(old_password)?; let result = SafeAccount { @@ -143,6 +156,7 @@ impl SafeAccount { Ok(result) } + /// Check if password matches the account. pub fn check_password(&self, password: &str) -> bool { self.crypto.secret(password).is_ok() } diff --git a/ethstore/src/bin/ethstore.rs b/ethstore/src/bin/ethstore.rs index 20411a629..3e8df3a35 100644 --- a/ethstore/src/bin/ethstore.rs +++ b/ethstore/src/bin/ethstore.rs @@ -22,7 +22,7 @@ use std::{env, process, fs}; use std::io::Read; use docopt::Docopt; use ethstore::ethkey::Address; -use ethstore::dir::{KeyDirectory, ParityDirectory, RootDiskDirectory, GethDirectory, DirectoryType}; +use ethstore::dir::{paths, KeyDirectory, RootDiskDirectory}; use ethstore::{EthStore, SimpleSecretStore, SecretStore, import_accounts, Error, PresaleWallet, SecretVaultRef, StoreAccountRef}; @@ -49,14 +49,14 @@ Usage: Options: -h, --help Display this message and exit. --dir DIR Specify the secret store directory. It may be either - parity, parity-test, geth, geth-test + parity, parity-(chain), geth, geth-test or a path [default: parity]. --vault VAULT Specify vault to use in this operation. --vault-pwd VAULTPWD Specify vault password to use in this operation. Please note that this option is required when vault option is set. Otherwise it is ignored. --src DIR Specify import source. It may be either - parity, parity-test, get, geth-test + parity, parity-(chain), get, geth-test or a path [default: geth]. Commands: @@ -116,10 +116,13 @@ fn main() { fn key_dir(location: &str) -> Result, Error> { let dir: Box = match location { - "parity" => Box::new(ParityDirectory::create(DirectoryType::Main)?), - "parity-test" => Box::new(ParityDirectory::create(DirectoryType::Testnet)?), - "geth" => Box::new(GethDirectory::create(DirectoryType::Main)?), - "geth-test" => Box::new(GethDirectory::create(DirectoryType::Testnet)?), + "geth" => Box::new(RootDiskDirectory::create(paths::geth(false))?), + "geth-test" => Box::new(RootDiskDirectory::create(paths::geth(true))?), + path if path.starts_with("parity") => { + let chain = path.split('-').nth(1).unwrap_or("ethereum"); + let path = paths::parity(chain); + Box::new(RootDiskDirectory::create(path)?) + }, path => Box::new(RootDiskDirectory::create(path)?), }; @@ -254,4 +257,3 @@ fn execute(command: I) -> Result where I: IntoIterator. - -use std::env; -use std::path::PathBuf; -use {SafeAccount, Error}; -use super::{KeyDirectory, RootDiskDirectory, DirectoryType}; - -#[cfg(target_os = "macos")] -fn geth_dir_path() -> PathBuf { - let mut home = env::home_dir().expect("Failed to get home dir"); - home.push("Library"); - home.push("Ethereum"); - home -} - -#[cfg(windows)] -/// Default path for ethereum installation on Windows -pub fn geth_dir_path() -> PathBuf { - let mut home = env::home_dir().expect("Failed to get home dir"); - home.push("AppData"); - home.push("Roaming"); - home.push("Ethereum"); - home -} - -#[cfg(not(any(target_os = "macos", windows)))] -/// Default path for ethereum installation on posix system which is not Mac OS -pub fn geth_dir_path() -> PathBuf { - let mut home = env::home_dir().expect("Failed to get home dir"); - home.push(".ethereum"); - home -} - -fn geth_keystore(t: DirectoryType) -> PathBuf { - let mut dir = geth_dir_path(); - match t { - DirectoryType::Testnet => { - dir.push("testnet"); - dir.push("keystore"); - }, - DirectoryType::Main => { - dir.push("keystore"); - } - } - dir -} - -pub struct GethDirectory { - dir: RootDiskDirectory, -} - -impl GethDirectory { - pub fn create(t: DirectoryType) -> Result { - let result = GethDirectory { - dir: RootDiskDirectory::create(geth_keystore(t))?, - }; - - Ok(result) - } - - pub fn open(t: DirectoryType) -> Self { - GethDirectory { - dir: RootDiskDirectory::at(geth_keystore(t)), - } - } -} - -impl KeyDirectory for GethDirectory { - fn load(&self) -> Result, Error> { - self.dir.load() - } - - fn insert(&self, account: SafeAccount) -> Result { - self.dir.insert(account) - } - - fn update(&self, account: SafeAccount) -> Result { - self.dir.update(account) - } - - fn remove(&self, account: &SafeAccount) -> Result<(), Error> { - self.dir.remove(account) - } - - fn unique_repr(&self) -> Result { - self.dir.unique_repr() - } -} diff --git a/ethstore/src/dir/memory.rs b/ethstore/src/dir/memory.rs index 955afc5b0..b8c2ad9ff 100644 --- a/ethstore/src/dir/memory.rs +++ b/ethstore/src/dir/memory.rs @@ -22,6 +22,7 @@ use ethkey::Address; use {SafeAccount, Error}; use super::KeyDirectory; +/// Accounts in-memory storage. #[derive(Default)] pub struct MemoryDirectory { accounts: RwLock>>, diff --git a/ethstore/src/dir/mod.rs b/ethstore/src/dir/mod.rs index 83e978707..fb22c06ee 100755 --- a/ethstore/src/dir/mod.rs +++ b/ethstore/src/dir/mod.rs @@ -14,19 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Accounts Directory + use std::path::{PathBuf}; use {SafeAccount, Error}; mod disk; -mod geth; mod memory; -mod parity; mod vault; - -pub enum DirectoryType { - Testnet, - Main, -} +pub mod paths; /// `VaultKeyDirectory::set_key` error #[derive(Debug)] @@ -54,7 +50,7 @@ pub trait KeyDirectory: Send + Sync { fn load(&self) -> Result, Error>; /// Insert new key to directory fn insert(&self, account: SafeAccount) -> Result; - //// Update key in directory + /// Update key in the directory fn update(&self, account: SafeAccount) -> Result; /// Remove key from directory fn remove(&self, account: &SafeAccount) -> Result<(), Error>; @@ -95,9 +91,7 @@ pub trait VaultKeyDirectory: KeyDirectory { } pub use self::disk::RootDiskDirectory; -pub use self::geth::GethDirectory; pub use self::memory::MemoryDirectory; -pub use self::parity::ParityDirectory; pub use self::vault::VaultDiskDirectory; impl VaultKey { diff --git a/ethstore/src/dir/parity.rs b/ethstore/src/dir/parity.rs deleted file mode 100755 index df03260d3..000000000 --- a/ethstore/src/dir/parity.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::env; -use std::path::PathBuf; -use {SafeAccount, Error}; -use super::{KeyDirectory, RootDiskDirectory, DirectoryType}; - -fn parity_dir_path() -> PathBuf { - let mut home = env::home_dir().expect("Failed to get home dir"); - home.push(".parity"); - home -} - -fn parity_keystore(t: DirectoryType) -> PathBuf { - let mut dir = parity_dir_path(); - match t { - DirectoryType::Testnet => { - dir.push("testnet_keys"); - }, - DirectoryType::Main => { - dir.push("keys"); - } - } - dir -} - -pub struct ParityDirectory { - dir: RootDiskDirectory, -} - -impl ParityDirectory { - pub fn create(t: DirectoryType) -> Result { - let result = ParityDirectory { - dir: RootDiskDirectory::create(parity_keystore(t))?, - }; - - Ok(result) - } - - pub fn open(t: DirectoryType) -> Self { - ParityDirectory { - dir: RootDiskDirectory::at(parity_keystore(t)), - } - } -} - -impl KeyDirectory for ParityDirectory { - fn load(&self) -> Result, Error> { - self.dir.load() - } - - fn insert(&self, account: SafeAccount) -> Result { - self.dir.insert(account) - } - - fn update(&self, account: SafeAccount) -> Result { - self.dir.update(account) - } - - fn remove(&self, account: &SafeAccount) -> Result<(), Error> { - self.dir.remove(account) - } - - fn unique_repr(&self) -> Result { - self.dir.unique_repr() - } -} diff --git a/ethstore/src/dir/paths.rs b/ethstore/src/dir/paths.rs new file mode 100644 index 000000000..db3178cff --- /dev/null +++ b/ethstore/src/dir/paths.rs @@ -0,0 +1,96 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Common tools paths. + +use std::env; +use std::path::PathBuf; + +fn home() -> PathBuf { + env::home_dir().expect("Failed to get home dir") +} + +/// Geth path +pub fn geth(testnet: bool) -> PathBuf { + let mut base = geth_base(); + if testnet { + base.push("testnet"); + } + base.push("keystore"); + base +} + +/// Parity path for specific chain +pub fn parity(chain: &str) -> PathBuf { + let mut base = parity_base(); + base.push(chain); + base +} + +#[cfg(target_os = "macos")] +fn parity_base() -> PathBuf { + let mut home = home(); + home.push("Library"); + home.push("Application Support"); + home.push("io.parity.ethereum"); + home.push("keys"); + home +} + +#[cfg(windows)] +fn parity_base() -> PathBuf { + let mut home = home(); + home.push("AppData"); + home.push("Roaming"); + home.push("Parity"); + home.push("Ethereum"); + home.push("keys"); + home +} + +#[cfg(not(any(target_os = "macos", windows)))] +fn parity_base() -> PathBuf { + let mut home = home(); + home.push(".local"); + home.push("share"); + home.push("io.parity.ethereum"); + home.push("keys"); + home +} + +#[cfg(target_os = "macos")] +fn geth_base() -> PathBuf { + let mut home = home(); + home.push("Library"); + home.push("Ethereum"); + home +} + +#[cfg(windows)] +fn geth_base() -> PathBuf { + let mut home = home(); + home.push("AppData"); + home.push("Roaming"); + home.push("Ethereum"); + home +} + +#[cfg(not(any(target_os = "macos", windows)))] +fn geth_base() -> PathBuf { + let mut home = home(); + home.push(".ethereum"); + home +} diff --git a/ethstore/src/error.rs b/ethstore/src/error.rs index 8a2eb5e8b..f7e0b0bfa 100755 --- a/ethstore/src/error.rs +++ b/ethstore/src/error.rs @@ -20,23 +20,40 @@ use ethkey::Error as EthKeyError; use crypto::Error as EthCryptoError; use ethkey::DerivationError; +/// Account-related errors. #[derive(Debug)] pub enum Error { + /// IO error Io(IoError), + /// Invalid Password InvalidPassword, + /// Account's secret is invalid. InvalidSecret, + /// Invalid Vault Crypto meta. InvalidCryptoMeta, + /// Invalid Account. InvalidAccount, + /// Invalid Message. InvalidMessage, + /// Invalid Key File InvalidKeyFile(String), + /// Vaults are not supported. VaultsAreNotSupported, + /// Unsupported vault UnsupportedVault, + /// Invalid vault name InvalidVaultName, + /// Vault not found VaultNotFound, + /// Account creation failed. CreationFailed, + /// `EthKey` error EthKey(EthKeyError), + /// `EthCrypto` error EthCrypto(EthCryptoError), + /// Derivation error Derivation(DerivationError), + /// Custom error Custom(String), } diff --git a/ethstore/src/ethstore.rs b/ethstore/src/ethstore.rs index cacb6054f..5fb76791e 100755 --- a/ethstore/src/ethstore.rs +++ b/ethstore/src/ethstore.rs @@ -25,18 +25,21 @@ use ethkey::{self, Signature, Address, Message, Secret, Public, KeyPair, Extende use dir::{KeyDirectory, VaultKeyDirectory, VaultKey, SetKeyError}; use account::SafeAccount; use presale::PresaleWallet; -use json::{self, Uuid}; +use json::{self, Uuid, OpaqueKeyFile}; use {import, Error, SimpleSecretStore, SecretStore, SecretVaultRef, StoreAccountRef, Derivation}; +/// Accounts store. pub struct EthStore { store: EthMultiStore, } impl EthStore { + /// Open a new accounts store with given key directory backend. pub fn open(directory: Box) -> Result { Self::open_with_iterations(directory, KEY_ITERATIONS as u32) } + /// Open a new account store with given key directory backend and custom number of iterations. pub fn open_with_iterations(directory: Box, iterations: u32) -> Result { Ok(EthStore { store: EthMultiStore::open_with_iterations(directory, iterations)?, @@ -44,7 +47,7 @@ impl EthStore { } fn get(&self, account: &StoreAccountRef) -> Result { - let mut accounts = self.store.get(account)?.into_iter(); + let mut accounts = self.store.get_accounts(account)?.into_iter(); accounts.next().ok_or(Error::InvalidAccount) } } @@ -76,6 +79,10 @@ impl SimpleSecretStore for EthStore { self.store.change_password(account, old_password, new_password) } + fn export_account(&self, account: &StoreAccountRef, password: &str) -> Result { + self.store.export_account(account, password) + } + fn remove_account(&self, account: &StoreAccountRef, password: &str) -> Result<(), Error> { self.store.remove_account(account, password) } @@ -234,11 +241,12 @@ pub struct EthMultiStore { } impl EthMultiStore { - + /// Open new multi-accounts store with given key directory backend. pub fn open(directory: Box) -> Result { Self::open_with_iterations(directory, KEY_ITERATIONS as u32) } + /// Open new multi-accounts store with given key directory backend and custom number of iterations for new keys. pub fn open_with_iterations(directory: Box, iterations: u32) -> Result { let store = EthMultiStore { dir: directory, @@ -259,7 +267,7 @@ impl EthMultiStore { } self.reload_accounts()?; *last_dir_hash = dir_hash; - Ok(()) + Ok(()) } fn reload_accounts(&self) -> Result<(), Error> { @@ -287,7 +295,7 @@ impl EthMultiStore { Ok(()) } - fn get(&self, account: &StoreAccountRef) -> Result, Error> { + fn get_accounts(&self, account: &StoreAccountRef) -> Result, Error> { { let cache = self.cache.read(); if let Some(accounts) = cache.get(account) { @@ -307,6 +315,15 @@ impl EthMultiStore { } } + fn get_matching(&self, account: &StoreAccountRef, password: &str) -> Result, Error> { + let accounts = self.get_accounts(account)?; + + Ok(accounts.into_iter() + .filter(|acc| acc.check_password(password)) + .collect() + ) + } + fn import(&self, vault: SecretVaultRef, account: SafeAccount) -> Result { // save to file let account = match vault { @@ -398,12 +415,8 @@ impl SimpleSecretStore for EthMultiStore { fn insert_derived(&self, vault: SecretVaultRef, account_ref: &StoreAccountRef, password: &str, derivation: Derivation) -> Result { - let accounts = self.get(account_ref)?; + let accounts = self.get_matching(account_ref, password)?; for account in accounts { - // Skip if password is invalid - if !account.check_password(password) { - continue; - } let extended = self.generate(account.crypto.secret(password)?, derivation)?; return self.insert_account(vault, extended.secret().as_raw().clone(), password); } @@ -413,14 +426,9 @@ impl SimpleSecretStore for EthMultiStore { fn generate_derived(&self, account_ref: &StoreAccountRef, password: &str, derivation: Derivation) -> Result { - let accounts = self.get(&account_ref)?; + let accounts = self.get_matching(&account_ref, password)?; for account in accounts { - // Skip if password is invalid - if !account.check_password(password) { - continue; - } let extended = self.generate(account.crypto.secret(password)?, derivation)?; - return Ok(ethkey::public_to_address(extended.public().public())); } Err(Error::InvalidPassword) @@ -429,18 +437,13 @@ impl SimpleSecretStore for EthMultiStore { fn sign_derived(&self, account_ref: &StoreAccountRef, password: &str, derivation: Derivation, message: &Message) -> Result { - let accounts = self.get(&account_ref)?; + let accounts = self.get_matching(&account_ref, password)?; for account in accounts { - // Skip if password is invalid - if !account.check_password(password) { - continue; - } let extended = self.generate(account.crypto.secret(password)?, derivation)?; let secret = extended.secret().as_raw(); return Ok(ethkey::sign(&secret, message)?) } Err(Error::InvalidPassword) - } fn account_ref(&self, address: &Address) -> Result { @@ -457,47 +460,47 @@ impl SimpleSecretStore for EthMultiStore { } fn remove_account(&self, account_ref: &StoreAccountRef, password: &str) -> Result<(), Error> { - let accounts = self.get(account_ref)?; + let accounts = self.get_matching(account_ref, password)?; for account in accounts { - // Skip if password is invalid - if !account.check_password(password) { - continue; - } - return self.remove_safe_account(account_ref, &account); } + Err(Error::InvalidPassword) } fn change_password(&self, account_ref: &StoreAccountRef, old_password: &str, new_password: &str) -> Result<(), Error> { - let accounts = self.get(account_ref)?; + let accounts = self.get_matching(account_ref, old_password)?; + + if accounts.is_empty() { + return Err(Error::InvalidPassword); + } for account in accounts { // Change password let new_account = account.change_password(old_password, new_password, self.iterations)?; self.update(account_ref, account, new_account)?; } + Ok(()) } - fn sign(&self, account: &StoreAccountRef, password: &str, message: &Message) -> Result { - let accounts = self.get(account)?; - for account in accounts { - if account.check_password(password) { - return account.sign(password, message); - } - } + fn export_account(&self, account_ref: &StoreAccountRef, password: &str) -> Result { + self.get_matching(account_ref, password)?.into_iter().nth(0).map(Into::into).ok_or(Error::InvalidPassword) + } + fn sign(&self, account: &StoreAccountRef, password: &str, message: &Message) -> Result { + let accounts = self.get_matching(account, password)?; + for account in accounts { + return account.sign(password, message); + } Err(Error::InvalidPassword) } fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { - let accounts = self.get(account)?; + let accounts = self.get_matching(account, password)?; for account in accounts { - if account.check_password(password) { - return account.decrypt(password, shared_mac, message); - } + return account.decrypt(password, shared_mac, message); } Err(Error::InvalidPassword) } @@ -586,7 +589,7 @@ impl SimpleSecretStore for EthMultiStore { return Ok(account_ref); } - let account = self.get(&account_ref)?.into_iter().nth(0).ok_or(Error::InvalidAccount)?; + let account = self.get_accounts(&account_ref)?.into_iter().nth(0).ok_or(Error::InvalidAccount)?; let new_account_ref = self.import(vault, account.clone())?; self.remove_safe_account(&account_ref, &account)?; self.reload_accounts()?; @@ -1032,4 +1035,18 @@ mod tests { // then assert_eq!(store.get_vault_meta(name).unwrap(), "OldMeta".to_owned()); } + + #[test] + fn should_export_account() { + // given + let store = store(); + let keypair = keypair(); + let address = store.insert_account(SecretVaultRef::Root, keypair.secret().clone(), "test").unwrap(); + + // when + let exported = store.export_account(&address, "test"); + + // then + assert!(exported.is_ok(), "Should export single account: {:?}", exported); + } } diff --git a/ethstore/src/import.rs b/ethstore/src/import.rs index 0090631bd..b7497c9ff 100644 --- a/ethstore/src/import.rs +++ b/ethstore/src/import.rs @@ -16,9 +16,10 @@ use std::collections::HashSet; use ethkey::Address; -use dir::{GethDirectory, KeyDirectory, DirectoryType}; +use dir::{paths, KeyDirectory, RootDiskDirectory}; use Error; +/// Import all accounts from one directory to the other. pub fn import_accounts(src: &KeyDirectory, dst: &KeyDirectory) -> Result, Error> { let accounts = src.load()?; let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::>(); @@ -34,27 +35,15 @@ pub fn import_accounts(src: &KeyDirectory, dst: &KeyDirectory) -> Result Vec
{ - let t = if testnet { - DirectoryType::Testnet - } else { - DirectoryType::Main - }; - - GethDirectory::open(t) + RootDiskDirectory::at(paths::geth(testnet)) .load() .map(|d| d.into_iter().map(|a| a.address).collect()) .unwrap_or_else(|_| Vec::new()) } -/// Import specific `desired` accounts from the Geth keystore into `dst`. +/// Import specific `desired` accounts from the Geth keystore into `dst`. pub fn import_geth_accounts(dst: &KeyDirectory, desired: HashSet
, testnet: bool) -> Result, Error> { - let t = if testnet { - DirectoryType::Testnet - } else { - DirectoryType::Main - }; - - let src = GethDirectory::open(t); + let src = RootDiskDirectory::at(paths::geth(testnet)); let accounts = src.load()?; let existing_accounts = dst.load()?.into_iter().map(|a| a.address).collect::>(); diff --git a/ethstore/src/json/key_file.rs b/ethstore/src/json/key_file.rs index 21711df8f..a1c20acf2 100644 --- a/ethstore/src/json/key_file.rs +++ b/ethstore/src/json/key_file.rs @@ -16,11 +16,31 @@ use std::fmt; use std::io::{Read, Write}; -use serde::{Deserialize, Deserializer}; +use serde::{Serialize, Serializer, Deserialize, Deserializer}; use serde::de::{Error, Visitor, MapVisitor}; use serde_json; use super::{Uuid, Version, Crypto, H160}; +/// Public opaque type representing serializable `KeyFile`. +#[derive(Debug, PartialEq)] +pub struct OpaqueKeyFile { + key_file: KeyFile +} + +impl Serialize for OpaqueKeyFile { + fn serialize(&self, serializer: S) -> Result where + S: Serializer, + { + self.key_file.serialize(serializer) + } +} + +impl From for OpaqueKeyFile where T: Into { + fn from(val: T) -> Self { + OpaqueKeyFile { key_file: val.into() } + } +} + #[derive(Debug, PartialEq, Serialize)] pub struct KeyFile { pub id: Uuid, diff --git a/ethstore/src/json/mod.rs b/ethstore/src/json/mod.rs index 98033effd..865b75dea 100644 --- a/ethstore/src/json/mod.rs +++ b/ethstore/src/json/mod.rs @@ -36,7 +36,7 @@ pub use self::error::Error; pub use self::hash::{H128, H160, H256}; pub use self::id::Uuid; pub use self::kdf::{Kdf, KdfSer, Prf, Pbkdf2, Scrypt, KdfSerParams}; -pub use self::key_file::KeyFile; +pub use self::key_file::{KeyFile, OpaqueKeyFile}; pub use self::presale::{PresaleWallet, Encseed}; pub use self::vault_file::VaultFile; pub use self::vault_key_file::{VaultKeyFile, VaultKeyMeta, insert_vault_name_to_json_meta, remove_vault_name_from_json_meta}; diff --git a/ethstore/src/lib.rs b/ethstore/src/lib.rs index f092c3fe6..8203feeec 100755 --- a/ethstore/src/lib.rs +++ b/ethstore/src/lib.rs @@ -14,6 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Ethereum key-management. + + +#![warn(missing_docs)] + extern crate libc; extern crate itertools; extern crate smallvec; @@ -52,10 +57,11 @@ mod presale; mod random; mod secret_store; -pub use self::account::{SafeAccount}; +pub use self::account::SafeAccount; pub use self::error::Error; pub use self::ethstore::{EthStore, EthMultiStore}; pub use self::import::{import_accounts, read_geth_accounts}; +pub use self::json::OpaqueKeyFile as KeyFile; pub use self::presale::PresaleWallet; pub use self::secret_store::{ SecretVaultRef, StoreAccountRef, SimpleSecretStore, SecretStore, diff --git a/ethstore/src/presale.rs b/ethstore/src/presale.rs index 45d127664..dbbdcdc8d 100644 --- a/ethstore/src/presale.rs +++ b/ethstore/src/presale.rs @@ -8,6 +8,7 @@ use ethkey::{Address, Secret, KeyPair}; use crypto::Keccak256; use {crypto, Error}; +/// Pre-sale wallet. pub struct PresaleWallet { iv: [u8; 16], ciphertext: Vec, @@ -31,6 +32,7 @@ impl From for PresaleWallet { } impl PresaleWallet { + /// Open a pre-sale wallet. pub fn open

(path: P) -> Result where P: AsRef { let file = fs::File::open(path)?; let presale = json::PresaleWallet::load(file) @@ -38,6 +40,7 @@ impl PresaleWallet { Ok(PresaleWallet::from(presale)) } + /// Decrypt the wallet. pub fn decrypt(&self, password: &str) -> Result { let mut h_mac = Hmac::new(Sha256::new(), password.as_bytes()); let mut derived_key = vec![0u8; 16]; diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index 1eff95335..fd7eea50d 100755 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -18,7 +18,7 @@ use std::hash::{Hash, Hasher}; use std::path::PathBuf; use ethkey::{Address, Message, Signature, Secret, Public}; use Error; -use json::Uuid; +use json::{Uuid, OpaqueKeyFile}; use util::H256; /// Key directory reference @@ -39,16 +39,28 @@ pub struct StoreAccountRef { pub address: Address, } +/// Simple Secret Store API pub trait SimpleSecretStore: Send + Sync { + /// Inserts new accounts to the store (or vault) with given password. fn insert_account(&self, vault: SecretVaultRef, secret: Secret, password: &str) -> Result; + /// Inserts new derived account to the store (or vault) with given password. fn insert_derived(&self, vault: SecretVaultRef, account_ref: &StoreAccountRef, password: &str, derivation: Derivation) -> Result; + /// Changes accounts password. fn change_password(&self, account: &StoreAccountRef, old_password: &str, new_password: &str) -> Result<(), Error>; + /// Exports key details for account. + fn export_account(&self, account: &StoreAccountRef, password: &str) -> Result; + /// Entirely removes account from the store and underlying storage. fn remove_account(&self, account: &StoreAccountRef, password: &str) -> Result<(), Error>; + /// Generates new derived account. fn generate_derived(&self, account_ref: &StoreAccountRef, password: &str, derivation: Derivation) -> Result; + /// Sign a message with given account. fn sign(&self, account: &StoreAccountRef, password: &str, message: &Message) -> Result; + /// Sign a message with derived account. fn sign_derived(&self, account_ref: &StoreAccountRef, password: &str, derivation: Derivation, message: &Message) -> Result; + /// Decrypt a messages with given account. fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error>; + /// Returns all accounts in this secret store. fn accounts(&self) -> Result, Error>; /// Get reference to some account with given address. /// This method could be removed if we will guarantee that there is max(1) account for given address. @@ -74,23 +86,37 @@ pub trait SimpleSecretStore: Send + Sync { fn set_vault_meta(&self, name: &str, meta: &str) -> Result<(), Error>; } +/// Secret Store API pub trait SecretStore: SimpleSecretStore { + /// Imports presale wallet fn import_presale(&self, vault: SecretVaultRef, json: &[u8], password: &str) -> Result; + /// Imports existing JSON wallet fn import_wallet(&self, vault: SecretVaultRef, json: &[u8], password: &str) -> Result; + /// Copies account between stores and vaults. fn copy_account(&self, new_store: &SimpleSecretStore, new_vault: SecretVaultRef, account: &StoreAccountRef, password: &str, new_password: &str) -> Result<(), Error>; + /// Checks if password matches given account. fn test_password(&self, account: &StoreAccountRef, password: &str) -> Result; + /// Returns a public key for given account. fn public(&self, account: &StoreAccountRef, password: &str) -> Result; + /// Returns uuid of an account. fn uuid(&self, account: &StoreAccountRef) -> Result; + /// Returns account's name. fn name(&self, account: &StoreAccountRef) -> Result; + /// Returns account's metadata. fn meta(&self, account: &StoreAccountRef) -> Result; + /// Modifies account metadata. fn set_name(&self, account: &StoreAccountRef, name: String) -> Result<(), Error>; + /// Modifies account name. fn set_meta(&self, account: &StoreAccountRef, meta: String) -> Result<(), Error>; + /// Returns local path of the store. fn local_path(&self) -> PathBuf; + /// Lists all found geth accounts. fn list_geth_accounts(&self, testnet: bool) -> Vec

; + /// Imports geth accounts to the store/vault. fn import_geth_accounts(&self, vault: SecretVaultRef, desired: Vec
, testnet: bool) -> Result, Error>; } diff --git a/rpc/src/v1/impls/parity_accounts.rs b/rpc/src/v1/impls/parity_accounts.rs index 60b615897..828dbf8f4 100644 --- a/rpc/src/v1/impls/parity_accounts.rs +++ b/rpc/src/v1/impls/parity_accounts.rs @@ -20,6 +20,7 @@ use std::collections::BTreeMap; use util::{Address}; use ethkey::{Brain, Generator, Secret}; +use ethstore::KeyFile; use ethcore::account_provider::AccountProvider; use jsonrpc_core::Error; @@ -315,6 +316,17 @@ impl ParityAccounts for ParityAccountsClient { .map(Into::into) .map_err(|e| errors::account("Could not derive account.", e)) } + + fn export_account(&self, addr: RpcH160, password: String) -> Result { + let addr = addr.into(); + take_weak!(self.accounts) + .export_account( + &addr, + password, + ) + .map(Into::into) + .map_err(|e| errors::account("Could not export account.", e)) + } } fn into_vec(a: Vec) -> Vec where diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/rpc/src/v1/tests/mocked/parity_accounts.rs index ae4f74b49..ef356cd42 100644 --- a/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -472,3 +472,30 @@ fn derive_key_index() { let res = tester.io.handle_request_sync(&request); assert_eq!(res, Some(response.into())); } + + +#[test] +fn should_export_account() { + // given + let tester = setup(); + let wallet = r#"{"id":"6a186c80-7797-cff2-bc2e-7c1d6a6cc76e","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a1c6ff99070f8032ca1c4e8add006373"},"ciphertext":"df27e3db64aa18d984b6439443f73660643c2d119a6f0fa2fa9a6456fc802d75","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"ddc325335cda5567a1719313e73b4842511f3e4a837c9658eeb78e51ebe8c815"},"mac":"3dc888ae79cbb226ff9c455669f6cf2d79be72120f2298f6cb0d444fddc0aa3d"},"address":"0042e5d2a662eeaca8a7e828c174f98f35d8925b","name":"parity-export-test","meta":"{\"passwordHint\":\"parity-export-test\",\"timestamp\":1490017814987}"}"#; + tester.accounts.import_wallet(wallet.as_bytes(), "parity-export-test").unwrap(); + let accounts = tester.accounts.accounts().unwrap(); + assert_eq!(accounts.len(), 1); + + // invalid password + let request = r#"{"jsonrpc":"2.0","method":"parity_exportAccount","params":["0x0042e5d2a662eeaca8a7e828c174f98f35d8925b","123"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32023,"message":"Could not export account.","data":"InvalidPassword"},"id":1}"#; + let res = tester.io.handle_request_sync(&request); + assert_eq!(res, Some(response.into())); + + // correct password + let request = r#"{"jsonrpc":"2.0","method":"parity_exportAccount","params":["0x0042e5d2a662eeaca8a7e828c174f98f35d8925b","parity-export-test"],"id":1}"#; + + let response = r#"{"jsonrpc":"2.0","result":{"address":"0042e5d2a662eeaca8a7e828c174f98f35d8925b","crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a1c6ff99070f8032ca1c4e8add006373"},"ciphertext":"df27e3db64aa18d984b6439443f73660643c2d119a6f0fa2fa9a6456fc802d75","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"ddc325335cda5567a1719313e73b4842511f3e4a837c9658eeb78e51ebe8c815"},"mac":"3dc888ae79cbb226ff9c455669f6cf2d79be72120f2298f6cb0d444fddc0aa3d"},"id":"6a186c80-7797-cff2-bc2e-7c1d6a6cc76e","meta":"{\"passwordHint\":\"parity-export-test\",\"timestamp\":1490017814987}","name":"parity-export-test","version":3},"id":1}"#; + let result = tester.io.handle_request_sync(&request); + + println!("Result: {:?}", result); + println!("Response: {:?}", response); + assert_eq!(result, Some(response.into())); +} diff --git a/rpc/src/v1/traits/parity_accounts.rs b/rpc/src/v1/traits/parity_accounts.rs index a3a9a8d9f..46372560c 100644 --- a/rpc/src/v1/traits/parity_accounts.rs +++ b/rpc/src/v1/traits/parity_accounts.rs @@ -18,6 +18,7 @@ use std::collections::BTreeMap; use jsonrpc_core::Error; +use ethstore::KeyFile; use v1::types::{H160, H256, DappId, DeriveHash, DeriveHierarchical}; build_rpc_trait! { @@ -175,5 +176,9 @@ build_rpc_trait! { /// Resulting address can be either saved as a new account (with the same password). #[rpc(name = "parity_deriveAddressIndex")] fn derive_key_index(&self, H160, String, DeriveHierarchical, bool) -> Result; + + /// Exports an account with given address if provided password matches. + #[rpc(name = "parity_exportAccount")] + fn export_account(&self, H160, String) -> Result; } } From f5ea47a7b2533c7c534ff6b0c921e6940325f427 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Thu, 23 Mar 2017 13:25:31 +0100 Subject: [PATCH 11/11] Various installer and tray apps fixes (#4970) * Mac tray app fixes * Windows restarting fixed --- Cargo.lock | 1 + Cargo.toml | 1 + mac/Parity/AppDelegate.swift | 6 +---- parity/main.rs | 43 +++++++++++++++++++++++++++--------- 4 files changed, 35 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 99ebbe7e3..974206727 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -52,6 +52,7 @@ dependencies = [ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f0978f420..8420c5459 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ serde = "0.9" serde_json = "0.9" app_dirs = "1.1.1" fdlimit = "0.1" +ws2_32-sys = "0.2" hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } diff --git a/mac/Parity/AppDelegate.swift b/mac/Parity/AppDelegate.swift index 3bf3bdd31..c017e79bd 100644 --- a/mac/Parity/AppDelegate.swift +++ b/mac/Parity/AppDelegate.swift @@ -26,7 +26,6 @@ class AppDelegate: NSObject, NSApplicationDelegate { let statusItem = NSStatusBar.system().statusItem(withLength: NSVariableStatusItemLength) var parityPid: Int32? = nil var commandLine: [String] = [] - let defaultConfig = "[network]\nwarp = true" let defaultDefaults = "{\"fat_db\":false,\"mode\":\"passive\",\"mode.alarm\":3600,\"mode.timeout\":300,\"pruning\":\"fast\",\"tracing\":false}" func menuAppPath() -> String { @@ -51,7 +50,7 @@ class AppDelegate: NSObject, NSApplicationDelegate { func killParity() { if let pid = self.parityPid { - kill(pid, SIGINT) + kill(pid, SIGKILL) } } @@ -81,9 +80,6 @@ class AppDelegate: NSObject, NSApplicationDelegate { } let configFile = basePath?.appendingPathComponent("config.toml") - if !FileManager.default.fileExists(atPath: configFile!.path) { - try defaultConfig.write(to: configFile!, atomically: false, encoding: String.Encoding.utf8) - } } catch {} } diff --git a/parity/main.rs b/parity/main.rs index 3e499d483..2044b3ee0 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -75,6 +75,9 @@ extern crate ethcore_secretstore; #[cfg(feature = "dapps")] extern crate ethcore_dapps; +#[cfg(windows)] extern crate ws2_32; +#[cfg(windows)] extern crate winapi; + macro_rules! dependency { ($dep_ty:ident, $url:expr) => { { @@ -123,7 +126,7 @@ mod stratum; use std::{process, env}; use std::collections::HashMap; use std::io::{self as stdio, BufReader, Read, Write}; -use std::fs::{remove_file, metadata, File}; +use std::fs::{remove_file, metadata, File, create_dir_all}; use std::path::PathBuf; use util::sha3::sha3; use cli::Args; @@ -210,10 +213,11 @@ fn latest_exe_path() -> Option { } fn set_spec_name_override(spec_name: String) { - if let Err(e) = File::create(updates_path("spec_name_overide")) - .and_then(|mut f| f.write_all(spec_name.as_bytes())) + if let Err(e) = create_dir_all(default_hypervisor_path()) + .and_then(|_| File::create(updates_path("spec_name_overide")) + .and_then(|mut f| f.write_all(spec_name.as_bytes()))) { - warn!("Couldn't override chain spec: {}", e); + warn!("Couldn't override chain spec: {} at {:?}", e, updates_path("spec_name_overide")); } } @@ -227,12 +231,24 @@ fn take_spec_name_override() -> Option { #[cfg(windows)] fn global_cleanup() { - extern "system" { pub fn WSACleanup() -> i32; } // We need to cleanup all sockets before spawning another Parity process. This makes shure everything is cleaned up. // The loop is required because of internal refernce counter for winsock dll. We don't know how many crates we use do // initialize it. There's at least 2 now. for _ in 0.. 10 { - unsafe { WSACleanup(); } + unsafe { ::ws2_32::WSACleanup(); } + } +} + +#[cfg(not(windows))] +fn global_init() {} + +#[cfg(windows)] +fn global_init() { + // When restarting in the same process this reinits windows sockets. + unsafe { + const WS_VERSION: u16 = 0x202; + let mut wsdata: ::winapi::winsock2::WSADATA = ::std::mem::zeroed(); + ::ws2_32::WSAStartup(WS_VERSION, &mut wsdata); } } @@ -241,15 +257,17 @@ fn global_cleanup() {} // Starts ~/.parity-updates/parity and returns the code it exits with. fn run_parity() -> Option { - global_cleanup(); + global_init(); use ::std::ffi::OsString; let prefix = vec![OsString::from("--can-restart"), OsString::from("--force-direct")]; - latest_exe_path().and_then(|exe| process::Command::new(exe) + let res = latest_exe_path().and_then(|exe| process::Command::new(exe) .args(&(env::args_os().skip(1).chain(prefix.into_iter()).collect::>())) .status() .map(|es| es.code().unwrap_or(128)) .ok() - ) + ); + global_cleanup(); + res } const PLEASE_RESTART_EXIT_CODE: i32 = 69; @@ -257,10 +275,11 @@ const PLEASE_RESTART_EXIT_CODE: i32 = 69; // Run our version of parity. // Returns the exit error code. fn main_direct(can_restart: bool) -> i32 { + global_init(); let mut alt_mains = HashMap::new(); sync_main(&mut alt_mains); stratum_main(&mut alt_mains); - if let Some(f) = std::env::args().nth(1).and_then(|arg| alt_mains.get(&arg.to_string())) { + let res = if let Some(f) = std::env::args().nth(1).and_then(|arg| alt_mains.get(&arg.to_string())) { f(); 0 } else { @@ -280,7 +299,9 @@ fn main_direct(can_restart: bool) -> i32 { 1 }, } - } + }; + global_cleanup(); + res } fn println_trace_main(s: String) {