From cf0b8b7681156e1ee5c92d844d1cd0b50b17c9db Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Thu, 11 May 2017 13:30:11 +0800 Subject: [PATCH 01/29] add several new tests for spec::account --- json/src/spec/account.rs | 57 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/json/src/spec/account.rs b/json/src/spec/account.rs index 046592a71..2c98fd3f1 100644 --- a/json/src/spec/account.rs +++ b/json/src/spec/account.rs @@ -39,7 +39,7 @@ pub struct Account { } impl Account { - /// Returns true if account does not have nonce and balance. + /// Returns true if account does not have nonce, balance, code and storage. pub fn is_empty(&self) -> bool { self.balance.is_none() && self.nonce.is_none() && self.code.is_none() && self.storage.is_none() } @@ -54,6 +54,59 @@ mod tests { use uint::Uint; use bytes::Bytes; + #[test] + fn account_balance_missing_not_empty() { + let s = r#"{ + "nonce": "0", + "code": "1234", + "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } + }"#; + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } + + #[test] + fn account_nonce_missing_not_empty() { + let s = r#"{ + "balance": "1", + "code": "1234", + "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } + }"#; + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } + + #[test] + fn account_code_missing_not_empty() { + let s = r#"{ + "balance": "1", + "nonce": "0", + "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } + }"#; + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } + + #[test] + fn account_storage_missing_not_empty() { + let s = r#"{ + "balance": "1", + "nonce": "0", + "code": "1234" + }"#; + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); + } + + #[test] + fn account_empty() { + let s = r#"{ + "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } + }"#; + let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(deserialized.is_empty()); + } + #[test] fn account_deserialization() { let s = r#"{ @@ -63,6 +116,7 @@ mod tests { "code": "1234" }"#; let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1))); assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0))); assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34])); @@ -78,6 +132,7 @@ mod tests { "storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" } }"#; let deserialized: Account = serde_json::from_str(s).unwrap(); + assert!(!deserialized.is_empty()); assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1))); assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0))); assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34])); From f90802498f20c32e46411a5bd661c79dc213999e Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Thu, 11 May 2017 14:05:56 +0800 Subject: [PATCH 02/29] add two asserts in unit tests for spec::authority_round --- json/src/spec/authority_round.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/json/src/spec/authority_round.rs b/json/src/spec/authority_round.rs index 99d138d00..83e029c56 100644 --- a/json/src/spec/authority_round.rs +++ b/json/src/spec/authority_round.rs @@ -60,6 +60,8 @@ pub struct AuthorityRound { #[cfg(test)] mod tests { + use uint::Uint; + use util::U256; use serde_json; use spec::authority_round::AuthorityRound; @@ -79,6 +81,8 @@ mod tests { } }"#; - let _deserialized: AuthorityRound = serde_json::from_str(s).unwrap(); + let deserialized: AuthorityRound = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.params.gas_limit_bound_divisor, Uint(U256::from(0x0400))); + assert_eq!(deserialized.params.step_duration, Uint(U256::from(0x02))); } } From a0dd77ca016516840012dc9279a4ea8a24463ee7 Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Fri, 12 May 2017 23:46:29 +0800 Subject: [PATCH 03/29] add asserts in spec::validatorset's unit test --- json/src/spec/validator_set.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/json/src/spec/validator_set.rs b/json/src/spec/validator_set.rs index f433caa03..abcb35848 100644 --- a/json/src/spec/validator_set.rs +++ b/json/src/spec/validator_set.rs @@ -40,6 +40,10 @@ pub enum ValidatorSet { #[cfg(test)] mod tests { use serde_json; + use uint::Uint; + use util::U256; + use hash::Address; + use util::hash::H160; use spec::validator_set::ValidatorSet; #[test] @@ -58,6 +62,20 @@ mod tests { } }]"#; - let _deserialized: Vec = serde_json::from_str(s).unwrap(); + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.len(), 4); + + assert_eq!(deserialized[0], ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))])); + assert_eq!(deserialized[1], ValidatorSet::SafeContract(Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b")))); + assert_eq!(deserialized[2], ValidatorSet::Contract(Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b")))); + match deserialized[3] { + ValidatorSet::Multi(ref map) => { + assert_eq!(map.len(), 3); + assert!(map.contains_key(&Uint(U256::from(0)))); + assert!(map.contains_key(&Uint(U256::from(10)))); + assert!(map.contains_key(&Uint(U256::from(20)))); + }, + _ => assert!(false), + } } } From c0a8eaa3bf936aace24e9fb8b7e516facbfab94d Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Fri, 12 May 2017 23:51:02 +0800 Subject: [PATCH 04/29] add asserts in spec::authority_round's unit test --- json/src/spec/authority_round.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/json/src/spec/authority_round.rs b/json/src/spec/authority_round.rs index 83e029c56..ae518eebc 100644 --- a/json/src/spec/authority_round.rs +++ b/json/src/spec/authority_round.rs @@ -62,7 +62,10 @@ pub struct AuthorityRound { mod tests { use uint::Uint; use util::U256; + use util::H160; use serde_json; + use hash::Address; + use spec::validator_set::ValidatorSet; use spec::authority_round::AuthorityRound; #[test] @@ -84,5 +87,10 @@ mod tests { let deserialized: AuthorityRound = serde_json::from_str(s).unwrap(); assert_eq!(deserialized.params.gas_limit_bound_divisor, Uint(U256::from(0x0400))); assert_eq!(deserialized.params.step_duration, Uint(U256::from(0x02))); + assert_eq!(deserialized.params.validators, ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))])); + assert_eq!(deserialized.params.block_reward, Some(Uint(U256::from(0x50)))); + assert!(deserialized.params.registrar.is_none()); + assert_eq!(deserialized.params.start_step, Some(Uint(U256::from(24)))); + assert_eq!(deserialized.params.eip155_transition, Some(Uint(U256::from(0x42)))); } } From 90b8b612cca168dcfdd100d1772fc81295d97b8e Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sun, 14 May 2017 00:21:23 +0800 Subject: [PATCH 05/29] add asserts for spec::basic_authority --- json/src/spec/basic_authority.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/json/src/spec/basic_authority.rs b/json/src/spec/basic_authority.rs index 06f50268e..01fe8f088 100644 --- a/json/src/spec/basic_authority.rs +++ b/json/src/spec/basic_authority.rs @@ -42,7 +42,12 @@ pub struct BasicAuthority { #[cfg(test)] mod tests { use serde_json; + use uint::Uint; + use util::U256; + use hash::Address; + use util::hash::H160; use spec::basic_authority::BasicAuthority; + use spec::validator_set::ValidatorSet; #[test] fn basic_authority_deserialization() { @@ -56,6 +61,11 @@ mod tests { } }"#; - let _deserialized: BasicAuthority = serde_json::from_str(s).unwrap(); + let deserialized: BasicAuthority = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized.params.gas_limit_bound_divisor, Uint(U256::from(0x0400))); + assert_eq!(deserialized.params.duration_limit, Uint(U256::from(0x0d))); + let vs = ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))]); + assert_eq!(deserialized.params.validators, vs); } } From 0b4eef48c5c4dd2ea4b0b7adcb1047bf102bcf78 Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sun, 14 May 2017 00:31:57 +0800 Subject: [PATCH 06/29] add asserts in instantseal --- json/src/spec/instant_seal.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/json/src/spec/instant_seal.rs b/json/src/spec/instant_seal.rs index 168b51c68..ebf167d28 100644 --- a/json/src/spec/instant_seal.rs +++ b/json/src/spec/instant_seal.rs @@ -35,6 +35,8 @@ pub struct InstantSeal { #[cfg(test)] mod tests { use serde_json; + use hash::Address; + use util::hash::H160; use spec::instant_seal::InstantSeal; #[test] @@ -45,6 +47,7 @@ mod tests { } }"#; - let _deserialized: InstantSeal = serde_json::from_str(s).unwrap(); + let deserialized: InstantSeal = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.params.registrar, Some(Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b")))); } } From a61b249d57f4c87abd8dcdc9c179f146bc5c7a7c Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sun, 14 May 2017 10:53:53 +0800 Subject: [PATCH 07/29] add asserts in tendermint.rs --- json/src/spec/tendermint.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/json/src/spec/tendermint.rs b/json/src/spec/tendermint.rs index 16c2d7255..5a15a0841 100644 --- a/json/src/spec/tendermint.rs +++ b/json/src/spec/tendermint.rs @@ -57,7 +57,12 @@ pub struct Tendermint { #[cfg(test)] mod tests { use serde_json; + use uint::Uint; + use util::U256; + use hash::Address; + use util::hash::H160; use spec::tendermint::Tendermint; + use spec::validator_set::ValidatorSet; #[test] fn tendermint_deserialization() { @@ -71,6 +76,10 @@ mod tests { } }"#; - let _deserialized: Tendermint = serde_json::from_str(s).unwrap(); + let deserialized: Tendermint = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.params.gas_limit_bound_divisor, Uint(U256::from(0x0400))); + let vs = ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))]); + assert_eq!(deserialized.params.validators, vs); + assert_eq!(deserialized.params.block_reward, Some(Uint(U256::from(0x50)))); } } From 5c7c30cc4bb6d5f1528ab68f9b6983afdf88c715 Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sun, 14 May 2017 22:40:15 +0800 Subject: [PATCH 08/29] add asserts in spec param.rs --- json/src/spec/params.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index 31b5cf68a..d546ad86b 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -64,21 +64,27 @@ pub struct Params { #[cfg(test)] mod tests { use serde_json; + use uint::Uint; + use util::U256; use spec::params::Params; #[test] fn params_deserialization() { let s = r#"{ - "homesteadTransition": "0x118c30", "maximumExtraDataSize": "0x20", "networkID" : "0x1", "chainID" : "0x15", "subprotocolName" : "exp", "minGasLimit": "0x1388", - "accountStartNonce": "0x00" + "accountStartNonce": "0x01" }"#; - let _deserialized: Params = serde_json::from_str(s).unwrap(); - // TODO: validate all fields + let deserialized: Params = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.maximum_extra_data_size, Uint(U256::from(0x20))); + assert_eq!(deserialized.network_id, Uint(U256::from(0x1))); + assert_eq!(deserialized.chain_id, Some(Uint(U256::from(0x15)))); + assert_eq!(deserialized.subprotocol_name, Some("exp".to_owned())); + assert_eq!(deserialized.min_gas_limit, Uint(U256::from(0x1388))); + assert_eq!(deserialized.account_start_nonce, Some(Uint(U256::from(0x01)))); } } From f96731c82bc3bdb4f5ef1ea37b9dab49ccfac51b Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sun, 14 May 2017 23:20:34 +0800 Subject: [PATCH 09/29] add assets for seal.rs --- json/src/spec/seal.rs | 46 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/json/src/spec/seal.rs b/json/src/spec/seal.rs index 247bceb22..7345a623d 100644 --- a/json/src/spec/seal.rs +++ b/json/src/spec/seal.rs @@ -70,32 +70,62 @@ pub enum Seal { #[cfg(test)] mod tests { use serde_json; - use spec::Seal; + use hash::*; + use bytes::Bytes; + use uint::Uint; + use util::U256; + use util::{H64 as Eth64, H256 as Eth256, H520 as Eth520}; + use spec::{Ethereum, AuthorityRoundSeal, TendermintSeal, Seal}; #[test] fn seal_deserialization() { let s = r#"[{ "ethereum": { "nonce": "0x0000000000000042", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + "mixHash": "0x1000000000000000000000000000000000000000000000000000000000000001" } },{ "generic": "0xe011bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" },{ "authorityRound": { "step": "0x0", - "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "signature": "0x2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002" } },{ "tendermint": { - "round": "0x0", - "proposal": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "round": "0x3", + "proposal": "0x3000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003", "precommits": [ - "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "0x4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004" ] } }]"#; - let _deserialized: Vec = serde_json::from_str(s).unwrap(); - // TODO: validate all fields + + let deserialized: Vec = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.len(), 4); + + // [0] + assert_eq!(deserialized[0], Seal::Ethereum(Ethereum { + nonce: H64(Eth64::from("0x0000000000000042")), + mix_hash: H256(Eth256::from("0x1000000000000000000000000000000000000000000000000000000000000001")) + })); + + // [1] + assert_eq!(deserialized[1], Seal::Generic(Bytes::new(vec![ + 0xe0, 0x11, 0xbb, 0xe8, 0xdb, 0x4e, 0x34, 0x7b, 0x4e, 0x8c, 0x93, 0x7c, 0x1c, 0x83, 0x70, 0xe4, + 0xb5, 0xed, 0x33, 0xad, 0xb3, 0xdb, 0x69, 0xcb, 0xdb, 0x7a, 0x38, 0xe1, 0xe5, 0x0b, 0x1b, 0x82, 0xfa]))); + + // [2] + assert_eq!(deserialized[2], Seal::AuthorityRound(AuthorityRoundSeal { + step: Uint(U256::from(0x0)), + signature: H520(Eth520::from("0x2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002")) + })); + + // [3] + assert_eq!(deserialized[3], Seal::Tendermint(TendermintSeal { + round: Uint(U256::from(0x3)), + proposal: H520(Eth520::from("0x3000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003")), + precommits: vec![H520(Eth520::from("0x4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004"))] + })); } } From 292eb1de626688f035215868830241f1a2a68cdb Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Fri, 19 May 2017 23:56:33 +0800 Subject: [PATCH 10/29] add asserts in engine.rs --- json/src/spec/engine.rs | 67 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 2 deletions(-) diff --git a/json/src/spec/engine.rs b/json/src/spec/engine.rs index ef54c4ab1..5706b0680 100644 --- a/json/src/spec/engine.rs +++ b/json/src/spec/engine.rs @@ -58,7 +58,11 @@ mod tests { "instantSeal": { "params": {} } }"#; - let _deserialized: Engine = serde_json::from_str(s).unwrap(); + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::InstantSeal(_) => {}, // instant seal is unit tested in its own file. + _ => assert!(false), + }; let s = r#"{ "Ethash": { @@ -77,7 +81,66 @@ mod tests { } }"#; - let _deserialized: Engine = serde_json::from_str(s).unwrap(); + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::Ethash(_) => {}, // ethash is unit tested in its own file. + _ => assert!(false), + }; + + let s = r#"{ + "basicAuthority": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "durationLimit": "0x0d", + "validators" : { + "list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] + } + } + } + }"#; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::BasicAuthority(_) => {}, // basicAuthority is unit tested in its own file. + _ => assert!(false), + }; + + let s = r#"{ + "authorityRound": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "stepDuration": "0x02", + "validators": { + "list" : ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] + }, + "blockReward": "0x50", + "startStep" : 24, + "eip155Transition": "0x42", + "validateStepTransition": 150 + } + } + }"#; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::AuthorityRound(_) => {}, // AuthorityRound is unit tested in its own file. + _ => assert!(false), + }; + + let s = r#"{ + "tendermint": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "validators": { + "list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] + }, + "blockReward": "0x50" + } + } + }"#; + let deserialized: Engine = serde_json::from_str(s).unwrap(); + match deserialized { + Engine::Tendermint(_) => {}, // Tendermint is unit tested in its own file. + _ => assert!(false), + }; } } From 73ad575306ae3afda1f886ef6cf6de89a03f136a Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sat, 20 May 2017 00:17:52 +0800 Subject: [PATCH 11/29] add asserts in genesis.rs --- json/src/spec/genesis.rs | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/json/src/spec/genesis.rs b/json/src/spec/genesis.rs index 6f69f8c14..120df4fc1 100644 --- a/json/src/spec/genesis.rs +++ b/json/src/spec/genesis.rs @@ -58,7 +58,15 @@ pub struct Genesis { #[cfg(test)] mod tests { use serde_json; + use bytes::Bytes; + use uint::Uint; + use util::U256; + use hash::{H64, H256, Address}; + use util::hash::H160; + use util::{H64 as Eth64, H256 as Eth256}; use spec::genesis::Genesis; + use spec::{Ethereum, Seal}; + use std::str::FromStr; #[test] fn genesis_deserialization() { @@ -71,14 +79,29 @@ mod tests { "nonce": "0x00006d6f7264656e" } }, - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "author": "0x1000000000000000000000000000000000000001", + "timestamp": "0x07", + "parentHash": "0x9000000000000000000000000000000000000000000000000000000000000000", "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", "gasLimit": "0x1388", "stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" }"#; - let _deserialized: Genesis = serde_json::from_str(s).unwrap(); - // TODO: validate all fields + let deserialized: Genesis = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized, Genesis { + seal: Seal::Ethereum(Ethereum { + nonce: H64(Eth64::from("0x00006d6f7264656e")), + mix_hash: H256(Eth256::from("0x0000000000000000000000000000000000000000000000000000000000000000")) + }), + difficulty: Uint(U256::from(0x400000000u64)), + author: Some(Address(H160::from("0x1000000000000000000000000000000000000001"))), + timestamp: Some(Uint(U256::from(0x07))), + parent_hash: Some(H256(Eth256::from("0x9000000000000000000000000000000000000000000000000000000000000000"))), + gas_limit: Uint(U256::from(0x1388)), + transactions_root: None, + receipts_root: None, + state_root: Some(H256(Eth256::from("0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"))), + gas_used: None, + extra_data: Some(Bytes::from_str("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa").unwrap()), + }); } } From d35da909dbf601b43ac493901f4e23f1caa21f75 Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sat, 20 May 2017 12:11:59 +0800 Subject: [PATCH 12/29] add asserts in ethash.rs --- json/src/spec/ethash.rs | 108 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 99 insertions(+), 9 deletions(-) diff --git a/json/src/spec/ethash.rs b/json/src/spec/ethash.rs index 16d05a18f..926dcacee 100644 --- a/json/src/spec/ethash.rs +++ b/json/src/spec/ethash.rs @@ -131,7 +131,11 @@ pub struct Ethash { #[cfg(test)] mod tests { use serde_json; - use spec::ethash::Ethash; + use uint::Uint; + use util::U256; + use hash::Address; + use util::hash::H160; + use spec::ethash::{Ethash, EthashParams}; #[test] fn ethash_deserialization() { @@ -170,17 +174,71 @@ mod tests { ], "difficultyHardforkTransition": "0x59d9", "difficultyHardforkBoundDivisor": "0x0200", - "bombDefuseTransition": "0x42", + "bombDefuseTransition": "0x41", "eip100bTransition": "0x42", - "eip150Transition": "0x42", - "eip155Transition": "0x42", - "eip160Transition": "0x42", - "eip161abcTransition": "0x42", - "eip161dTransition": "0x42" + "eip150Transition": "0x43", + "eip155Transition": "0x44", + "eip160Transition": "0x45", + "eip161abcTransition": "0x46", + "eip161dTransition": "0x47" } }"#; - let _deserialized: Ethash = serde_json::from_str(s).unwrap(); + let deserialized: Ethash = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized, Ethash{ + params: EthashParams{ + gas_limit_bound_divisor: Uint(U256::from(0x0400)), + minimum_difficulty: Uint(U256::from(0x020000)), + difficulty_bound_divisor: Uint(U256::from(0x0800)), + difficulty_increment_divisor: None, + metropolis_difficulty_increment_divisor: None, + duration_limit: Uint(U256::from(0x0d)), + block_reward: Uint(U256::from(0x4563918244F40000u64)), + registrar: Some(Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))), + homestead_transition: Some(Uint(U256::from(0x42))), + dao_hardfork_transition: Some(Uint(U256::from(0x08))), + dao_hardfork_beneficiary: Some(Address(H160::from("0xabcabcabcabcabcabcabcabcabcabcabcabcabca"))), + dao_hardfork_accounts: Some(vec![ + Address(H160::from("0x304a554a310c7e546dfe434669c62820b7d83490")), + Address(H160::from("0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79")), + Address(H160::from("0xfe24cdd8648121a43a7c86d289be4dd2951ed49f")), + Address(H160::from("0x17802f43a0137c506ba92291391a8a8f207f487d")), + Address(H160::from("0xb136707642a4ea12fb4bae820f03d2562ebff487")), + Address(H160::from("0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940")), + Address(H160::from("0xf14c14075d6c4ed84b86798af0956deef67365b5")), + Address(H160::from("0xca544e5c4687d109611d0f8f928b53a25af72448")), + Address(H160::from("0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c")), + Address(H160::from("0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7")), + Address(H160::from("0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6")), + Address(H160::from("0x2b3455ec7fedf16e646268bf88846bd7a2319bb2")), + Address(H160::from("0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a")), + Address(H160::from("0xd343b217de44030afaa275f54d31a9317c7f441e")), + Address(H160::from("0x84ef4b2357079cd7a7c69fd7a37cd0609a679106")), + Address(H160::from("0xda2fef9e4a3230988ff17df2165440f37e8b1708")), + Address(H160::from("0xf4c64518ea10f995918a454158c6b61407ea345c")), + Address(H160::from("0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97")), + Address(H160::from("0xbb9bc244d798123fde783fcc1c72d3bb8c189413")), + Address(H160::from("0x807640a13483f8ac783c557fcdf27be11ea4ac7a")), + ]), + difficulty_hardfork_transition: Some(Uint(U256::from(0x59d9))), + difficulty_hardfork_bound_divisor: Some(Uint(U256::from(0x0200))), + bomb_defuse_transition: Some(Uint(U256::from(0x41))), + eip100b_transition: Some(Uint(U256::from(0x42))), + eip150_transition: Some(Uint(U256::from(0x43))), + eip155_transition: Some(Uint(U256::from(0x44))), + eip160_transition: Some(Uint(U256::from(0x45))), + eip161abc_transition: Some(Uint(U256::from(0x46))), + eip161d_transition: Some(Uint(U256::from(0x47))), + ecip1010_pause_transition: None, + ecip1010_continue_transition: None, + max_code_size: None, + max_gas_limit_transition: None, + max_gas_limit: None, + min_gas_price_transition: None, + min_gas_price: None, + } + }); } #[test] @@ -195,6 +253,38 @@ mod tests { } }"#; - let _deserialized: Ethash = serde_json::from_str(s).unwrap(); + let deserialized: Ethash = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized, Ethash{ + params: EthashParams{ + gas_limit_bound_divisor: Uint(U256::from(0x0400)), + minimum_difficulty: Uint(U256::from(0x020000)), + difficulty_bound_divisor: Uint(U256::from(0x0800)), + difficulty_increment_divisor: None, + metropolis_difficulty_increment_divisor: None, + duration_limit: Uint(U256::from(0x0d)), + block_reward: Uint(U256::from(0x4563918244F40000u64)), + registrar: None, + homestead_transition: None, + dao_hardfork_transition: None, + dao_hardfork_beneficiary: None, + dao_hardfork_accounts: None, + difficulty_hardfork_transition: None, + difficulty_hardfork_bound_divisor: None, + bomb_defuse_transition: None, + eip100b_transition: None, + eip150_transition: None, + eip155_transition: None, + eip160_transition: None, + eip161abc_transition: None, + eip161d_transition: None, + ecip1010_pause_transition: None, + ecip1010_continue_transition: None, + max_code_size: None, + max_gas_limit_transition: None, + max_gas_limit: None, + min_gas_price_transition: None, + min_gas_price: None, + } + }); } } From 14b715bdc7e746801ed4e83af8654445d2d7366b Mon Sep 17 00:00:00 2001 From: Guanqun Lu Date: Sun, 21 May 2017 10:42:41 +0800 Subject: [PATCH 13/29] fix not build issue, only detect pure js folder --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 353941ae0..cb8b18201 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -549,7 +549,7 @@ test-darwin: - triggers before_script: - git submodule update --init --recursive - - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l) + - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e "^js/" -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l) script: - export RUST_BACKTRACE=1 - if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi From f47cbe0be69d68f0bb4c0f9c874fc21e66dbd73d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 23 May 2017 12:24:32 +0200 Subject: [PATCH 14/29] Adding CLI options: port shift and unsafe expose. (#5677) * Adding CLI option for port shift and unsafe expose. * Fixing IPC path. * Fix hosts when attempting to expose on all interfaces. * Fixing test. * Fix typo. --- .../src/snapshot/tests/proof_of_authority.rs | 2 +- parity/cli/config.toml | 2 + parity/cli/mod.rs | 20 ++- parity/cli/usage.txt | 14 +- parity/configuration.rs | 157 +++++++++++++----- parity/helpers.rs | 11 +- parity/rpc.rs | 9 +- rpc/src/v1/helpers/network_settings.rs | 2 +- 8 files changed, 159 insertions(+), 58 deletions(-) diff --git a/ethcore/src/snapshot/tests/proof_of_authority.rs b/ethcore/src/snapshot/tests/proof_of_authority.rs index d82b6f3ae..5958a5f64 100644 --- a/ethcore/src/snapshot/tests/proof_of_authority.rs +++ b/ethcore/src/snapshot/tests/proof_of_authority.rs @@ -40,7 +40,7 @@ const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes acti const TRANSITION_BLOCK_2: usize = 6; // block at which the second contract activates. macro_rules! secret { - ($e: expr) => { Secret::from_slice(&$e.sha3()).expect(format!("sha3({}) not valid secret.", $e).as_str()) } + ($e: expr) => { Secret::from_slice(&$e.sha3()) } } lazy_static! { diff --git a/parity/cli/config.toml b/parity/cli/config.toml index b6695f3f5..c3617077e 100644 --- a/parity/cli/config.toml +++ b/parity/cli/config.toml @@ -81,3 +81,5 @@ jit = false logging = "own_tx=trace" log_file = "/var/log/parity.log" color = true +ports_shift = 0 +unsafe_expose = false diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index f36622357..efd618ffb 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -98,6 +98,13 @@ usage! { flag_no_persistent_txqueue: bool = false, or |c: &Config| otry!(c.parity).no_persistent_txqueue, + // -- Convenience Options + flag_config: String = "$BASE/config.toml", or |_| None, + flag_ports_shift: u16 = 0u16, + or |c: &Config| otry!(c.misc).ports_shift, + flag_unsafe_expose: bool = false, + or |c: &Config| otry!(c.misc).unsafe_expose, + // -- Account Options flag_unlock: Option = None, or |c: &Config| otry!(c.account).unlock.as_ref().map(|vec| Some(vec.join(","))), @@ -189,7 +196,7 @@ usage! { // IPC flag_no_ipc: bool = false, or |c: &Config| otry!(c.ipc).disable.clone(), - flag_ipc_path: String = "$BASE/jsonrpc.ipc", + flag_ipc_path: String = if cfg!(windows) { r"\\.\pipe\jsonrpc.ipc" } else { "$BASE/jsonrpc.ipc" }, or |c: &Config| otry!(c.ipc).path.clone(), flag_ipc_apis: String = "web3,eth,net,parity,parity_accounts,traces,rpc,secretstore", or |c: &Config| otry!(c.ipc).apis.as_ref().map(|vec| vec.join(",")), @@ -339,7 +346,6 @@ usage! { or |c: &Config| otry!(c.vm).jit.clone(), // -- Miscellaneous Options - flag_config: String = "$BASE/config.toml", or |_| None, flag_logging: Option = None, or |c: &Config| otry!(c.misc).logging.clone().map(Some), flag_log_file: Option = None, @@ -575,6 +581,8 @@ struct Misc { logging: Option, log_file: Option, color: Option, + ports_shift: Option, + unsafe_expose: Option, } #[cfg(test)] @@ -686,6 +694,11 @@ mod tests { flag_light: false, flag_no_persistent_txqueue: false, + // -- Convenience Options + flag_config: "$BASE/config.toml".into(), + flag_ports_shift: 0, + flag_unsafe_expose: false, + // -- Account Options flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), flag_password: vec!["~/.safe/password.file".into()], @@ -862,7 +875,6 @@ mod tests { // -- Miscellaneous Options flag_version: false, - flag_config: "$BASE/config.toml".into(), flag_logging: Some("own_tx=trace".into()), flag_log_file: Some("/var/log/parity.log".into()), flag_no_color: false, @@ -1037,6 +1049,8 @@ mod tests { logging: Some("own_tx=trace".into()), log_file: Some("/var/log/parity.log".into()), color: Some(true), + ports_shift: Some(0), + unsafe_expose: Some(false), }), stratum: None, }); diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 2eee70c7c..bd5ed1f84 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -76,6 +76,18 @@ Operating Options: potentially higher in bandwidth. Has no effect with subcommands (default: {flag_light}). +Convenience Options: + -c --config CONFIG Specify a filename containing a configuration file. + (default: {flag_config}) + --ports-shift SHIFT Add SHIFT to all port numbers Parity is listening on. + Includes network port and all servers (RPC, WebSockets, UI, IPFS, SecretStore). + (default: {flag_ports_shift}) + --unsafe-expose All servers will listen on external interfaces and will + be remotely accessible. It's equivalent with setting + the following: --{{ws,jsonrpc,ui,ipfs,secret_store,stratum}}-interface=all --*-hosts=all + This option is UNSAFE and should be used with great care! + (default: {flag_unsafe_expose}) + Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. ACCOUNTS is a comma-delimited list of addresses. @@ -441,8 +453,6 @@ Internal Options: --can-restart Executable will auto-restart if exiting with 69. Miscellaneous Options: - -c --config CONFIG Specify a filename containing a configuration file. - (default: {flag_config}) -l --logging LOGGING Specify the logging level. Must conform to the same format as RUST_LOG. (default: {flag_logging:?}) --log-file FILENAME Specify a filename into which logging should be diff --git a/parity/configuration.rs b/parity/configuration.rs index bfed67aa1..7985c9cd3 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -373,7 +373,7 @@ impl Configuration { public_node: public_node, geth_compatibility: geth_compatibility, ui_address: ui_address, - net_settings: self.network_settings(), + net_settings: self.network_settings()?, dapps_conf: dapps_conf, ipfs_conf: ipfs_conf, signer_conf: signer_conf, @@ -513,7 +513,7 @@ impl Configuration { Ok(Some(StratumOptions { io_path: self.directories().db, listen_addr: self.stratum_interface(), - port: self.args.flag_stratum_port, + port: self.args.flag_ports_shift + self.args.flag_stratum_port, secret: self.args.flag_stratum_secret.as_ref().map(|s| s.parse::().unwrap_or_else(|_| s.sha3())), })) } else { Ok(None) } @@ -556,10 +556,10 @@ impl Configuration { fn signer_config(&self) -> SignerConfiguration { SignerConfiguration { enabled: self.ui_enabled(), - port: self.args.flag_ui_port, + port: self.args.flag_ports_shift + self.args.flag_ui_port, interface: self.ui_interface(), signer_path: self.directories().signer, - skip_origin_validation: self.args.flag_ui_no_validation, + skip_origin_validation: self.args.flag_unsafe_expose || self.args.flag_ui_no_validation, } } @@ -581,9 +581,9 @@ impl Configuration { self_secret: self.secretstore_self_secret()?, nodes: self.secretstore_nodes()?, interface: self.secretstore_interface(), - port: self.args.flag_secretstore_port, + port: self.args.flag_ports_shift + self.args.flag_secretstore_port, http_interface: self.secretstore_http_interface(), - http_port: self.args.flag_secretstore_http_port, + http_port: self.args.flag_ports_shift + self.args.flag_secretstore_http_port, data_path: self.directories().secretstore, }) } @@ -591,7 +591,7 @@ impl Configuration { fn ipfs_config(&self) -> IpfsConfiguration { IpfsConfiguration { enabled: self.args.flag_ipfs_api, - port: self.args.flag_ipfs_api_port, + port: self.args.flag_ports_shift + self.args.flag_ipfs_api_port, interface: self.ipfs_interface(), cors: self.ipfs_cors(), hosts: self.ipfs_hosts(), @@ -674,9 +674,9 @@ impl Configuration { } } - fn net_addresses(&self) -> Result<(Option, Option), String> { - let port = self.args.flag_port; - let listen_address = Some(SocketAddr::new("0.0.0.0".parse().unwrap(), port)); + fn net_addresses(&self) -> Result<(SocketAddr, Option), String> { + let port = self.args.flag_ports_shift + self.args.flag_port; + let listen_address = SocketAddr::new("0.0.0.0".parse().unwrap(), port); let public_address = if self.args.flag_nat.starts_with("extip:") { let host = &self.args.flag_nat[6..]; let host = host.parse().map_err(|_| format!("Invalid host given with `--nat extip:{}`", host))?; @@ -692,7 +692,7 @@ impl Configuration { ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp"; ret.boot_nodes = to_bootnodes(&self.args.flag_bootnodes)?; let (listen, public) = self.net_addresses()?; - ret.listen_address = listen.map(|l| format!("{}", l)); + ret.listen_address = Some(format!("{}", listen)); ret.public_address = public.map(|p| format!("{}", p)); ret.use_secret = match self.args.flag_node_key.as_ref() .map(|s| s.parse::().or_else(|_| Secret::from_unsafe_slice(&s.sha3())).map_err(|e| format!("Invalid key: {:?}", e)) @@ -746,7 +746,19 @@ impl Configuration { Self::cors(self.args.flag_ipfs_api_cors.as_ref()) } - fn hosts(hosts: &str) -> Option> { + fn hosts(&self, hosts: &str, interface: &str) -> Option> { + if self.args.flag_unsafe_expose { + return None; + } + + if interface == "0.0.0.0" && hosts == "none" { + return None; + } + + Self::parse_hosts(hosts) + } + + fn parse_hosts(hosts: &str) -> Option> { match hosts { "none" => return Some(Vec::new()), "*" | "all" | "any" => return None, @@ -757,19 +769,19 @@ impl Configuration { } fn rpc_hosts(&self) -> Option> { - Self::hosts(&self.args.flag_jsonrpc_hosts) + self.hosts(&self.args.flag_jsonrpc_hosts, &self.rpc_interface()) } fn ws_hosts(&self) -> Option> { - Self::hosts(&self.args.flag_ws_hosts) + self.hosts(&self.args.flag_ws_hosts, &self.ws_interface()) } fn ws_origins(&self) -> Option> { - Self::hosts(&self.args.flag_ws_origins) + Self::parse_hosts(&self.args.flag_ws_origins) } fn ipfs_hosts(&self) -> Option> { - Self::hosts(&self.args.flag_ipfs_api_hosts) + self.hosts(&self.args.flag_ipfs_api_hosts, &self.ipfs_interface()) } fn ipc_config(&self) -> Result { @@ -795,7 +807,7 @@ impl Configuration { let conf = HttpConfiguration { enabled: self.rpc_enabled(), interface: self.rpc_interface(), - port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), + port: self.args.flag_ports_shift + self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), apis: match self.args.flag_public_node { false => self.rpc_apis().parse()?, true => self.rpc_apis().parse::()?.retain(ApiSet::PublicContext), @@ -816,7 +828,7 @@ impl Configuration { let conf = WsConfiguration { enabled: self.ws_enabled(), interface: self.ws_interface(), - port: self.args.flag_ws_port, + port: self.args.flag_ports_shift + self.args.flag_ws_port, apis: self.args.flag_ws_apis.parse()?, hosts: self.ws_hosts(), origins: self.ws_origins() @@ -825,15 +837,17 @@ impl Configuration { Ok(conf) } - fn network_settings(&self) -> NetworkSettings { - NetworkSettings { + fn network_settings(&self) -> Result { + let http_conf = self.http_config()?; + let net_addresses = self.net_addresses()?; + Ok(NetworkSettings { name: self.args.flag_identity.clone(), chain: self.chain(), - network_port: self.args.flag_port, - rpc_enabled: self.rpc_enabled(), - rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()), - rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), - } + network_port: net_addresses.0.port(), + rpc_enabled: http_conf.enabled, + rpc_interface: http_conf.interface, + rpc_port: http_conf.port, + }) } fn update_policy(&self) -> Result { @@ -906,7 +920,11 @@ impl Configuration { if self.args.flag_geth { geth_ipc_path(self.args.flag_testnet) } else { - parity_ipc_path(&self.directories().base, &self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) + parity_ipc_path( + &self.directories().base, + &self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone()), + self.args.flag_ports_shift, + ) } } @@ -919,13 +937,14 @@ impl Configuration { } fn ui_interface(&self) -> String { - match self.args.flag_ui_interface.as_str() { - "local" => "127.0.0.1", - x => x, - }.into() + self.interface(&self.args.flag_ui_interface) } - fn interface(interface: &str) -> String { + fn interface(&self, interface: &str) -> String { + if self.args.flag_unsafe_expose { + return "0.0.0.0".into(); + } + match interface { "all" => "0.0.0.0", "local" => "127.0.0.1", @@ -934,23 +953,24 @@ impl Configuration { } fn rpc_interface(&self) -> String { - Self::interface(&self.network_settings().rpc_interface) + let rpc_interface = self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()); + self.interface(&rpc_interface) } fn ws_interface(&self) -> String { - Self::interface(&self.args.flag_ws_interface) + self.interface(&self.args.flag_ws_interface) } fn ipfs_interface(&self) -> String { - Self::interface(&self.args.flag_ipfs_api_interface) + self.interface(&self.args.flag_ipfs_api_interface) } fn secretstore_interface(&self) -> String { - Self::interface(&self.args.flag_secretstore_interface) + self.interface(&self.args.flag_secretstore_interface) } fn secretstore_http_interface(&self) -> String { - Self::interface(&self.args.flag_secretstore_http_interface) + self.interface(&self.args.flag_secretstore_http_interface) } fn secretstore_self_secret(&self) -> Result, String> { @@ -986,7 +1006,7 @@ impl Configuration { } fn stratum_interface(&self) -> String { - Self::interface(&self.args.flag_stratum_interface) + self.interface(&self.args.flag_stratum_interface) } fn rpc_enabled(&self) -> bool { @@ -1322,23 +1342,23 @@ mod tests { let conf = parse(&["parity", "--testnet", "--identity", "testname"]); // then - assert_eq!(conf.network_settings(), NetworkSettings { + assert_eq!(conf.network_settings(), Ok(NetworkSettings { name: "testname".to_owned(), chain: "testnet".to_owned(), network_port: 30303, rpc_enabled: true, - rpc_interface: "local".to_owned(), + rpc_interface: "127.0.0.1".to_owned(), rpc_port: 8545, - }); + })); } #[test] fn should_parse_rpc_settings_with_geth_compatiblity() { // given fn assert(conf: Configuration) { - let net = conf.network_settings(); + let net = conf.network_settings().unwrap(); assert_eq!(net.rpc_enabled, true); - assert_eq!(net.rpc_interface, "all".to_owned()); + assert_eq!(net.rpc_interface, "0.0.0.0".to_owned()); assert_eq!(net.rpc_port, 8000); assert_eq!(conf.rpc_cors(), Some(vec!["*".to_owned()])); assert_eq!(conf.rpc_apis(), "web3,eth".to_owned()); @@ -1516,4 +1536,57 @@ mod tests { _ => panic!("Should be Cmd::Run"), } } + + #[test] + fn should_apply_ports_shift() { + // given + + // when + let conf0 = parse(&["parity", "--ports-shift", "1", "--stratum"]); + let conf1 = parse(&["parity", "--ports-shift", "1", "--jsonrpc-port", "8544"]); + + // then + assert_eq!(conf0.net_addresses().unwrap().0.port(), 30304); + assert_eq!(conf0.network_settings().unwrap().network_port, 30304); + assert_eq!(conf0.network_settings().unwrap().rpc_port, 8546); + assert_eq!(conf0.http_config().unwrap().port, 8546); + assert_eq!(conf0.ws_config().unwrap().port, 8547); + assert_eq!(conf0.signer_config().port, 8181); + assert_eq!(conf0.secretstore_config().unwrap().port, 8084); + assert_eq!(conf0.secretstore_config().unwrap().http_port, 8083); + assert_eq!(conf0.ipfs_config().port, 5002); + assert_eq!(conf0.stratum_options().unwrap().unwrap().port, 8009); + + + assert_eq!(conf1.net_addresses().unwrap().0.port(), 30304); + assert_eq!(conf1.network_settings().unwrap().network_port, 30304); + assert_eq!(conf1.network_settings().unwrap().rpc_port, 8545); + assert_eq!(conf1.http_config().unwrap().port, 8545); + assert_eq!(conf1.ws_config().unwrap().port, 8547); + assert_eq!(conf1.signer_config().port, 8181); + assert_eq!(conf1.secretstore_config().unwrap().port, 8084); + assert_eq!(conf1.secretstore_config().unwrap().http_port, 8083); + assert_eq!(conf1.ipfs_config().port, 5002); + } + + #[test] + fn should_expose_all_servers() { + // given + + // when + let conf0 = parse(&["parity", "--unsafe-expose"]); + + // then + assert_eq!(&conf0.network_settings().unwrap().rpc_interface, "0.0.0.0"); + assert_eq!(&conf0.http_config().unwrap().interface, "0.0.0.0"); + assert_eq!(conf0.http_config().unwrap().hosts, None); + assert_eq!(&conf0.ws_config().unwrap().interface, "0.0.0.0"); + assert_eq!(conf0.ws_config().unwrap().hosts, None); + assert_eq!(&conf0.signer_config().interface, "0.0.0.0"); + assert_eq!(conf0.signer_config().skip_origin_validation, true); + assert_eq!(&conf0.secretstore_config().unwrap().interface, "0.0.0.0"); + assert_eq!(&conf0.secretstore_config().unwrap().http_interface, "0.0.0.0"); + assert_eq!(&conf0.ipfs_config().interface, "0.0.0.0"); + assert_eq!(conf0.ipfs_config().hosts, None); + } } diff --git a/parity/helpers.rs b/parity/helpers.rs index e98c31ab7..449d8f569 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -166,13 +166,12 @@ pub fn geth_ipc_path(testnet: bool) -> String { } /// Formats and returns parity ipc path. -pub fn parity_ipc_path(base: &str, s: &str) -> String { - // Windows path should not be hardcoded here. - if cfg!(windows) { - return r"\\.\pipe\parity.jsonrpc".to_owned(); +pub fn parity_ipc_path(base: &str, path: &str, shift: u16) -> String { + let mut path = path.to_owned(); + if shift != 0 { + path = path.replace("jsonrpc.ipc", &format!("jsonrpc-{}.ipc", shift)); } - - replace_home(base, s) + replace_home(base, &path) } /// Validates and formats bootnodes option. diff --git a/parity/rpc.rs b/parity/rpc.rs index 2ac93baf7..eb8f5c279 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -18,7 +18,6 @@ use std::io; use std::sync::Arc; use dapps; -use dir::default_data_path; use parity_rpc::informant::{RpcStats, Middleware}; use parity_rpc::{self as rpc, HttpServerError, Metadata, Origin, DomainsValidation}; use helpers::parity_ipc_path; @@ -63,10 +62,14 @@ pub struct IpcConfiguration { impl Default for IpcConfiguration { fn default() -> Self { - let data_dir = default_data_path(); IpcConfiguration { enabled: true, - socket_addr: parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc"), + socket_addr: if cfg!(windows) { + r"\\.\pipe\jsonrpc.ipc".into() + } else { + let data_dir = ::dir::default_data_path(); + parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0) + }, apis: ApiSet::IpcContext, } } diff --git a/rpc/src/v1/helpers/network_settings.rs b/rpc/src/v1/helpers/network_settings.rs index cda32c658..a79828624 100644 --- a/rpc/src/v1/helpers/network_settings.rs +++ b/rpc/src/v1/helpers/network_settings.rs @@ -39,7 +39,7 @@ impl Default for NetworkSettings { chain: "foundation".into(), network_port: 30303, rpc_enabled: true, - rpc_interface: "local".into(), + rpc_interface: "127.0.0.1".into(), rpc_port: 8545 } } From 92f5aa7e10574282570ad2a2043fc2d6523bf199 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Tue, 23 May 2017 12:25:41 +0200 Subject: [PATCH 15/29] improved dockerfile builds (#5659) * docker builds current branch instead of cloning the repo * moved ADD command right before RUN cd parity in Dockerfiles * update docker/README.md * removed --no-cache from docker/README.md --- .dockerignore | 22 +++++++++ docker/README.md | 3 ++ docker/centos/Dockerfile | 15 +++--- docker/hub/Dockerfile | 83 -------------------------------- docker/hub/README.md | 3 -- docker/ubuntu-aarch64/Dockerfile | 10 ++-- docker/ubuntu-arm/Dockerfile | 11 ++--- docker/ubuntu-dev/Dockerfile | 37 -------------- docker/ubuntu-jit/Dockerfile | 7 ++- docker/ubuntu-stable/Dockerfile | 40 --------------- docker/ubuntu/Dockerfile | 7 ++- 11 files changed, 48 insertions(+), 190 deletions(-) create mode 100644 .dockerignore create mode 100644 docker/README.md delete mode 100644 docker/hub/Dockerfile delete mode 100644 docker/hub/README.md delete mode 100644 docker/ubuntu-dev/Dockerfile delete mode 100644 docker/ubuntu-stable/Dockerfile diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..a8095795a --- /dev/null +++ b/.dockerignore @@ -0,0 +1,22 @@ +# Generated by Cargo +# will have compiled files and executables +target + +*.swp +*.swo +*.swn +*.DS_Store + +# Visual Studio Code stuff +.vscode + +# GitEye stuff +.project + +# idea ide +.idea + +# git stuff +.git + +ethcore/res/ethereum/tests diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..3b79e8dd7 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,3 @@ +Usage + +```docker build -f docker/ubuntu/Dockerfile --tag ethcore/parity:branch_or_tag_name .``` diff --git a/docker/centos/Dockerfile b/docker/centos/Dockerfile index a4b98e9d6..747a227c9 100644 --- a/docker/centos/Dockerfile +++ b/docker/centos/Dockerfile @@ -1,29 +1,32 @@ FROM centos:latest WORKDIR /build + # install tools and dependencies RUN yum -y update&& \ yum install -y git make gcc-c++ gcc file binutils + # install rustup RUN curl -sSf https://static.rust-lang.org/rustup.sh -o rustup.sh &&\ ls&&\ sh rustup.sh --disable-sudo + # show backtraces ENV RUST_BACKTRACE 1 + # set compiler ENV CXX g++ ENV CC gcc + # show tools RUN rustc -vV && \ cargo -V && \ gcc -v &&\ g++ -v + # build parity -RUN git clone https://github.com/paritytech/parity && \ - cd parity&&\ - git checkout beta && \ - git pull && \ - ls -a&&\ - cargo build --release --verbose && \ +ADD . /build/parity +RUN cd parity&&\ + cargo build --release --verbose && \ ls /build/parity/target/release/parity && \ strip /build/parity/target/release/parity diff --git a/docker/hub/Dockerfile b/docker/hub/Dockerfile deleted file mode 100644 index 3120eeba7..000000000 --- a/docker/hub/Dockerfile +++ /dev/null @@ -1,83 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Parity Technologies -WORKDIR /build -#ENV for build TAG -ARG BUILD_TAG -ENV BUILD_TAG ${BUILD_TAG:-master} -RUN echo $BUILD_TAG -# install tools and dependencies -RUN apt-get update && \ - apt-get install -y --force-yes --no-install-recommends \ - # make - build-essential \ - # add-apt-repository - software-properties-common \ - make \ - curl \ - wget \ - git \ - g++ \ - gcc \ - libc6 \ - libc6-dev \ - binutils \ - file \ - openssl \ - libssl-dev \ - libudev-dev \ - pkg-config \ - dpkg-dev \ - # evmjit dependencies - zlib1g-dev \ - libedit-dev \ - libudev-dev &&\ -# cmake and llvm ppa's. then update ppa's - add-apt-repository -y "ppa:george-edison55/cmake-3.x" && \ - add-apt-repository "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main" && \ - apt-get update && \ - apt-get install -y --force-yes cmake llvm-3.7-dev && \ -# install evmjit - git clone https://github.com/debris/evmjit && \ - cd evmjit && \ - mkdir build && cd build && \ - cmake .. && make && make install && cd && \ -# install rustup - curl https://sh.rustup.rs -sSf | sh -s -- -y && \ -# rustup directory - PATH=/root/.cargo/bin:$PATH && \ -# show backtraces - RUST_BACKTRACE=1 && \ -# build parity - cd /build&&git clone https://github.com/paritytech/parity && \ - cd parity && \ - git pull&& \ - git checkout $BUILD_TAG && \ - cargo build --verbose --release --features final && \ - #ls /build/parity/target/release/parity && \ - strip /build/parity/target/release/parity && \ - file /build/parity/target/release/parity&&mkdir -p /parity&& cp /build/parity/target/release/parity /parity&&\ -#cleanup Docker image - rm -rf /root/.cargo&&rm -rf /root/.multirust&&rm -rf /root/.rustup&&rm -rf /build&&\ - apt-get purge -y \ - # make - build-essential \ - # add-apt-repository - software-properties-common \ - make \ - curl \ - wget \ - git \ - g++ \ - gcc \ - binutils \ - file \ - pkg-config \ - dpkg-dev \ - # evmjit dependencies - zlib1g-dev \ - libedit-dev \ - cmake llvm-3.7-dev&&\ - rm -rf /var/lib/apt/lists/* -# setup ENTRYPOINT -EXPOSE 8080 8545 8180 -ENTRYPOINT ["/parity/parity"] diff --git a/docker/hub/README.md b/docker/hub/README.md deleted file mode 100644 index 6253f53f0..000000000 --- a/docker/hub/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Usage - -```docker build --build-arg BUILD_TAG=branch_or_tag_name --no-cache=true --tag ethcore/parity:branch_or_tag_name .``` diff --git a/docker/ubuntu-aarch64/Dockerfile b/docker/ubuntu-aarch64/Dockerfile index c0af9dc0e..eee1587f4 100644 --- a/docker/ubuntu-aarch64/Dockerfile +++ b/docker/ubuntu-aarch64/Dockerfile @@ -1,5 +1,6 @@ FROM ubuntu:14.04 WORKDIR /build + # install tools and dependencies RUN apt-get -y update && \ apt-get install -y --force-yes --no-install-recommends \ @@ -24,14 +25,11 @@ RUN rustup target add aarch64-unknown-linux-gnu ENV RUST_BACKTRACE 1 # show tools - RUN rustc -vV && \ - cargo -V +RUN rustc -vV && cargo -V # build parity -RUN git clone https://github.com/paritytech/parity && \ - cd parity && \ - git checkout beta && \ - git pull && \ +ADD . /build/parity +RUN cd parity && \ mkdir -p .cargo && \ echo '[target.aarch64-unknown-linux-gnu]\n\ linker = "aarch64-linux-gnu-gcc"\n'\ diff --git a/docker/ubuntu-arm/Dockerfile b/docker/ubuntu-arm/Dockerfile index a371e190a..f971c98f1 100644 --- a/docker/ubuntu-arm/Dockerfile +++ b/docker/ubuntu-arm/Dockerfile @@ -1,5 +1,6 @@ FROM ubuntu:14.04 WORKDIR /build + # install tools and dependencies RUN apt-get -y update && \ apt-get install -y --force-yes --no-install-recommends \ @@ -23,16 +24,12 @@ RUN rustup target add armv7-unknown-linux-gnueabihf # show backtraces ENV RUST_BACKTRACE 1 - # show tools - RUN rustc -vV && \ - cargo -V +RUN rustc -vV && cargo -V # build parity -RUN git clone https://github.com/paritytech/parity && \ - cd parity && \ - git checkout beta && \ - git pull && \ +ADD . /build/parity +RUN cd parity && \ mkdir -p .cargo && \ echo '[target.armv7-unknown-linux-gnueabihf]\n\ linker = "arm-linux-gnueabihf-gcc"\n'\ diff --git a/docker/ubuntu-dev/Dockerfile b/docker/ubuntu-dev/Dockerfile deleted file mode 100644 index 72b4bb08e..000000000 --- a/docker/ubuntu-dev/Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -FROM ubuntu:14.04 - -# install tools and dependencies -RUN apt-get update && \ - apt-get install -y \ - # make - build-essential \ - # add-apt-repository - software-properties-common \ - curl \ - g++ \ - wget \ - git \ - # evmjit dependencies - zlib1g-dev \ - libedit-dev - -# cmake, llvm and rocksdb ppas. then update ppas -RUN add-apt-repository -y "ppa:george-edison55/cmake-3.x" && \ - add-apt-repository "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main" && \ - apt-get update && \ - apt-get install -y --force-yes cmake llvm-3.7-dev - -# install evmjit -RUN git clone https://github.com/debris/evmjit && \ - cd evmjit && \ - mkdir build && cd build && \ - cmake .. && make && make install && cd - -# install rustup -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y - -# rustup directory -ENV PATH /root/.cargo/bin:$PATH - -# show backtraces -ENV RUST_BACKTRACE 1 diff --git a/docker/ubuntu-jit/Dockerfile b/docker/ubuntu-jit/Dockerfile index 548c87db6..610d07c7d 100644 --- a/docker/ubuntu-jit/Dockerfile +++ b/docker/ubuntu-jit/Dockerfile @@ -1,5 +1,6 @@ FROM ubuntu:14.04 WORKDIR /build + # install tools and dependencies RUN apt-get update && \ apt-get install -y \ @@ -45,10 +46,8 @@ gcc -v &&\ g++ -v # build parity -RUN git clone https://github.com/paritytech/parity && \ - cd parity && \ - git checkout beta && \ - git pull && \ +ADD . /build/parity +RUN cd parity && \ cargo build --release --features ethcore/jit --verbose && \ ls /build/parity/target/release/parity && \ strip /build/parity/target/release/parity diff --git a/docker/ubuntu-stable/Dockerfile b/docker/ubuntu-stable/Dockerfile deleted file mode 100644 index c3fdd362a..000000000 --- a/docker/ubuntu-stable/Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -FROM ubuntu:14.04 -WORKDIR /build -# install tools and dependencies -RUN apt-get update && \ - apt-get install -y \ - build-essential \ - g++ \ - curl \ - git \ - file \ - binutils - -# install rustup -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y - -# rustup directory -ENV PATH /root/.cargo/bin:$PATH - -# show backtraces -ENV RUST_BACKTRACE 1 - -# show tools -RUN rustc -vV && \ -cargo -V && \ -gcc -v &&\ -g++ -v - -# build parity -RUN git clone https://github.com/paritytech/parity && \ - cd parity && \ - git checkout stable && \ - git pull && \ - cargo build --release --verbose && \ - ls /build/parity/target/release/parity && \ - strip /build/parity/target/release/parity - -RUN file /build/parity/target/release/parity - -EXPOSE 8080 8545 8180 -ENTRYPOINT ["/build/parity/target/release/parity"] diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 0ee84e1c5..e840cdc6e 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -1,5 +1,6 @@ FROM ubuntu:14.04 WORKDIR /build + # install tools and dependencies RUN apt-get update && \ apt-get install -y \ @@ -29,10 +30,8 @@ gcc -v &&\ g++ -v # build parity -RUN git clone https://github.com/paritytech/parity && \ - cd parity && \ - git checkout beta && \ - git pull && \ +ADD . /build/parity +RUN cd parity && \ cargo build --release --verbose && \ ls /build/parity/target/release/parity && \ strip /build/parity/target/release/parity From f38cc8e1826c69151cdaa2c2b8e932daf4d99f03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 23 May 2017 12:26:39 +0200 Subject: [PATCH 16/29] Latest headers Pub-Sub (#5655) * Signer subscription. * Fixing RPC tests. * Block Headers eth-pubsub. * PubSub for light client. * Fixing tests. * Updating to proper jsonrpc version. * Update to correct tests. * Fixing tests. --- Cargo.lock | 38 +++++-- ethcore/light/src/client/mod.rs | 25 ++++- ethcore/src/client/chain_notify.rs | 6 +- parity/cli/mod.rs | 6 +- parity/cli/usage.txt | 2 +- parity/rpc.rs | 50 +-------- parity/rpc_apis.rs | 33 ++++-- parity/signer.rs | 2 +- rpc/src/v1/impls/eth_pubsub.rs | 153 ++++++++++++++++++++++++++ rpc/src/v1/impls/mod.rs | 2 + rpc/src/v1/impls/parity.rs | 22 +--- rpc/src/v1/mod.rs | 2 +- rpc/src/v1/tests/mocked/eth_pubsub.rs | 104 +++++++++++++++++ rpc/src/v1/tests/mocked/mod.rs | 1 + rpc/src/v1/traits/eth_pubsub.rs | 42 +++++++ rpc/src/v1/traits/mod.rs | 2 + rpc/src/v1/types/block.rs | 37 ++++++- rpc/src/v1/types/filter.rs | 4 +- rpc/src/v1/types/mod.rs | 2 + rpc/src/v1/types/pubsub.rs | 114 +++++++++++++++++++ 20 files changed, 551 insertions(+), 96 deletions(-) create mode 100644 rpc/src/v1/impls/eth_pubsub.rs create mode 100644 rpc/src/v1/tests/mocked/eth_pubsub.rs create mode 100644 rpc/src/v1/traits/eth_pubsub.rs create mode 100644 rpc/src/v1/types/pubsub.rs diff --git a/Cargo.lock b/Cargo.lock index 224b84439..342dee62e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -836,6 +836,11 @@ dependencies = [ "miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "fnv" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "futures" version = "0.1.11" @@ -873,6 +878,18 @@ name = "glob" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "globset" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hamming" version = "0.1.3" @@ -1040,7 +1057,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1052,7 +1069,7 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1065,7 +1082,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1078,7 +1095,7 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1088,7 +1105,7 @@ dependencies = [ [[package]] name = "jsonrpc-minihttp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1102,7 +1119,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1112,8 +1129,9 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ + "globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1123,7 +1141,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1137,7 +1155,7 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#900b528213ffd1aaaefd29e2b99dfab892b15ab4" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -2840,11 +2858,13 @@ dependencies = [ "checksum ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5d237300af825a8d78f4c0dc835b0eab76a207e9df4aa088d91e162a173e0ca0" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" +"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" "checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d" "checksum futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bb982bb25cd8fa5da6a8eb3a460354c984ff1113da82bcb4f0b0862b5795db82" "checksum gcc 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)" = "c07c758b972368e703a562686adb39125707cc1ef3399da8c019fc6c2498a75d" "checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" +"checksum globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90d069fe6beb9be359ef505650b3f73228c5591a3c4b1f32be2f4f44459ffa3a" "checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1" "checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c" "checksum heck 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f807d2f64cc044a6bcf250ff23e59be0deec7a16612c014f962a06fa7e020f9" diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 7e6213273..57cd61cec 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -16,7 +16,7 @@ //! Light client implementation. Stores data from light sync -use std::sync::Arc; +use std::sync::{Weak, Arc}; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; @@ -111,6 +111,12 @@ pub trait LightChainClient: Send + Sync { fn eip86_transition(&self) -> u64; } +/// An actor listening to light chain events. +pub trait LightChainNotify: Send + Sync { + /// Notifies about imported headers. + fn new_headers(&self, good: &[H256]); +} + /// Something which can be treated as a `LightChainClient`. pub trait AsLightClient { /// The kind of light client this can be treated as. @@ -134,6 +140,7 @@ pub struct Client { report: RwLock, import_lock: Mutex<()>, db: Arc, + listeners: RwLock>>, } impl Client { @@ -148,9 +155,15 @@ impl Client { report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), db: db, + listeners: RwLock::new(vec![]), }) } + /// Adds a new `LightChainNotify` listener. + pub fn add_listener(&self, listener: Weak) { + self.listeners.write().push(listener); + } + /// Create a new `Client` backed purely in-memory. /// This will ignore all database options in the configuration. pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel, cache: Arc>) -> Self { @@ -272,6 +285,8 @@ impl Client { self.queue.mark_as_bad(&bad); self.queue.mark_as_good(&good); + + self.notify(|listener| listener.new_headers(&good)); } /// Get a report about blocks imported. @@ -327,6 +342,14 @@ impl Client { Arc::new(v) } + + fn notify(&self, f: F) { + for listener in &*self.listeners.read() { + if let Some(listener) = listener.upgrade() { + f(&*listener) + } + } + } } impl LightChainClient for Client { diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index 01f32edf7..0a9bff8d7 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -21,7 +21,8 @@ use util::{H256, Bytes}; #[ipc] pub trait ChainNotify : Send + Sync { /// fires when chain has new blocks. - fn new_blocks(&self, + fn new_blocks( + &self, _imported: Vec, _invalid: Vec, _enacted: Vec, @@ -29,7 +30,8 @@ pub trait ChainNotify : Send + Sync { _sealed: Vec, // Block bytes. _proposed: Vec, - _duration: u64) { + _duration: u64, + ) { // does nothing by default } diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index efd618ffb..45285fa4a 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -172,7 +172,7 @@ usage! { or |c: &Config| otry!(c.rpc).interface.clone(), flag_jsonrpc_cors: Option = None, or |c: &Config| otry!(c.rpc).cors.clone().map(Some), - flag_jsonrpc_apis: String = "web3,eth,net,parity,traces,rpc,secretstore", + flag_jsonrpc_apis: String = "web3,eth,pubsub,net,parity,traces,rpc,secretstore", or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), flag_jsonrpc_hosts: String = "none", or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), @@ -186,7 +186,7 @@ usage! { or |c: &Config| otry!(c.websockets).port.clone(), flag_ws_interface: String = "local", or |c: &Config| otry!(c.websockets).interface.clone(), - flag_ws_apis: String = "web3,eth,net,parity,traces,rpc,secretstore", + flag_ws_apis: String = "web3,eth,pubsub,net,parity,traces,rpc,secretstore", or |c: &Config| otry!(c.websockets).apis.as_ref().map(|vec| vec.join(",")), flag_ws_origins: String = "none", or |c: &Config| otry!(c.websockets).origins.as_ref().map(|vec| vec.join(",")), @@ -198,7 +198,7 @@ usage! { or |c: &Config| otry!(c.ipc).disable.clone(), flag_ipc_path: String = if cfg!(windows) { r"\\.\pipe\jsonrpc.ipc" } else { "$BASE/jsonrpc.ipc" }, or |c: &Config| otry!(c.ipc).path.clone(), - flag_ipc_apis: String = "web3,eth,net,parity,parity_accounts,traces,rpc,secretstore", + flag_ipc_apis: String = "web3,eth,pubsub,net,parity,parity_accounts,traces,rpc,secretstore", or |c: &Config| otry!(c.ipc).apis.as_ref().map(|vec| vec.join(",")), // DAPPS diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index bd5ed1f84..34352450a 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -180,7 +180,7 @@ API and Console Options: all (all interfaces) or local (default: {flag_ws_interface}). --ws-apis APIS Specify the APIs available through the WebSockets interface. APIS is a comma-delimited list of API - name. Possible name are web3, eth, net, personal, + name. Possible name are web3, eth, pubsub, net, personal, parity, parity_set, traces, rpc, parity_accounts. (default: {flag_ws_apis}). --ws-origins URL Specify Origin header values allowed to connect. diff --git a/parity/rpc.rs b/parity/rpc.rs index eb8f5c279..ae0e08858 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -21,7 +21,7 @@ use dapps; use parity_rpc::informant::{RpcStats, Middleware}; use parity_rpc::{self as rpc, HttpServerError, Metadata, Origin, DomainsValidation}; use helpers::parity_ipc_path; -use jsonrpc_core::{futures, MetaIoHandler}; +use jsonrpc_core::MetaIoHandler; use parity_reactor::TokioRemote; use rpc_apis::{self, ApiSet}; @@ -129,53 +129,13 @@ impl rpc::IpcMetaExtractor for RpcExtractor { } } -struct Sender(rpc::ws::ws::Sender, futures::sync::mpsc::Receiver); - -impl futures::Future for Sender { - type Item = (); - type Error = (); - - fn poll(&mut self) -> futures::Poll { - use self::futures::Stream; - - let item = self.1.poll()?; - match item { - futures::Async::NotReady => { - Ok(futures::Async::NotReady) - }, - futures::Async::Ready(None) => { - Ok(futures::Async::Ready(())) - }, - futures::Async::Ready(Some(val)) => { - if let Err(e) = self.0.send(val) { - warn!("Error sending a subscription update: {:?}", e); - } - self.poll() - }, - } - } -} - -struct WsRpcExtractor { - remote: TokioRemote, -} - -impl WsRpcExtractor { - fn wrap_out(&self, out: rpc::ws::ws::Sender) -> futures::sync::mpsc::Sender { - let (sender, receiver) = futures::sync::mpsc::channel(8); - self.remote.spawn(move |_| Sender(out, receiver)); - sender - } -} - +struct WsRpcExtractor; impl rpc::ws::MetaExtractor for WsRpcExtractor { fn extract(&self, req: &rpc::ws::RequestContext) -> Metadata { let mut metadata = Metadata::default(); let id = req.session_id as u64; metadata.origin = Origin::Ws(id.into()); - metadata.session = Some(Arc::new(rpc::PubSubSession::new( - self.wrap_out(req.out.clone()) - ))); + metadata.session = Some(Arc::new(rpc::PubSubSession::new(req.sender()))); metadata } } @@ -221,9 +181,7 @@ pub fn new_ws( remote.clone(), allowed_origins, allowed_hosts, - WsRpcExtractor { - remote: remote, - }, + WsRpcExtractor, WsStats { stats: deps.stats.clone(), }, diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 8b89c179f..d738d1eee 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -18,7 +18,7 @@ use std::cmp::PartialEq; use std::collections::BTreeMap; use std::collections::HashSet; use std::str::FromStr; -use std::sync::Arc; +use std::sync::{Arc, Weak}; pub use parity_rpc::SignerService; @@ -46,6 +46,8 @@ pub enum Api { Net, /// Eth (Safe) Eth, + /// Eth Pub-Sub (Safe) + EthPubSub, /// Geth-compatible "personal" API (DEPRECATED; only used in `--geth` mode.) Personal, /// Signer - Confirm transactions in Signer (UNSAFE: Passwords, List of transactions) @@ -74,6 +76,7 @@ impl FromStr for Api { "web3" => Ok(Web3), "net" => Ok(Net), "eth" => Ok(Eth), + "pubsub" => Ok(EthPubSub), "personal" => Ok(Personal), "signer" => Ok(Signer), "parity" => Ok(Parity), @@ -153,6 +156,7 @@ fn to_modules(apis: &[Api]) -> BTreeMap { Api::Web3 => ("web3", "1.0"), Api::Net => ("net", "1.0"), Api::Eth => ("eth", "1.0"), + Api::EthPubSub => ("pubsub", "1.0"), Api::Personal => ("personal", "1.0"), Api::Signer => ("signer", "1.0"), Api::Parity => ("parity", "1.0"), @@ -254,6 +258,11 @@ impl FullDependencies { add_signing_methods!(EthSigning, handler, self); } }, + Api::EthPubSub => { + let client = EthPubSubClient::new(self.client.clone(), self.remote.clone()); + self.client.add_notify(client.handler()); + handler.extend_with(client.to_delegate()); + }, Api::Personal => { handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); }, @@ -410,6 +419,13 @@ impl Dependencies for LightDependencies { handler.extend_with(EthFilter::to_delegate(client)); add_signing_methods!(EthSigning, handler, self); }, + Api::EthPubSub => { + let client = EthPubSubClient::new(self.client.clone(), self.remote.clone()); + self.client.add_listener( + Arc::downgrade(&client.handler()) as Weak<::light::client::LightChainNotify> + ); + handler.extend_with(EthPubSub::to_delegate(client)); + }, Api::Personal => { let secret_store = Some(self.secret_store.clone()); handler.extend_with(PersonalClient::new(&secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); @@ -471,7 +487,7 @@ impl ApiSet { pub fn list_apis(&self) -> HashSet { let mut public_list = vec![ - Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Rpc, Api::SecretStore, ].into_iter().collect(); match *self { ApiSet::List(ref apis) => apis.clone(), @@ -522,6 +538,7 @@ mod test { assert_eq!(Api::Web3, "web3".parse().unwrap()); assert_eq!(Api::Net, "net".parse().unwrap()); assert_eq!(Api::Eth, "eth".parse().unwrap()); + assert_eq!(Api::EthPubSub, "pubsub".parse().unwrap()); assert_eq!(Api::Personal, "personal".parse().unwrap()); assert_eq!(Api::Signer, "signer".parse().unwrap()); assert_eq!(Api::Parity, "parity".parse().unwrap()); @@ -547,7 +564,7 @@ mod test { fn test_api_set_unsafe_context() { let expected = vec![ // make sure this list contains only SAFE methods - Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore ].into_iter().collect(); assert_eq!(ApiSet::UnsafeContext.list_apis(), expected); } @@ -556,7 +573,7 @@ mod test { fn test_api_set_ipc_context() { let expected = vec![ // safe - Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, // semi-safe Api::ParityAccounts ].into_iter().collect(); @@ -567,7 +584,7 @@ mod test { fn test_api_set_safe_context() { let expected = vec![ // safe - Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, // semi-safe Api::ParityAccounts, // Unsafe @@ -579,7 +596,7 @@ mod test { #[test] fn test_all_apis() { assert_eq!("all".parse::().unwrap(), ApiSet::List(vec![ - Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, Api::ParityAccounts, Api::ParitySet, Api::Signer, Api::Personal @@ -589,7 +606,7 @@ mod test { #[test] fn test_all_without_personal_apis() { assert_eq!("personal,all,-personal".parse::().unwrap(), ApiSet::List(vec![ - Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, Api::ParityAccounts, Api::ParitySet, Api::Signer, ].into_iter().collect())); @@ -598,7 +615,7 @@ mod test { #[test] fn test_safe_parsing() { assert_eq!("safe".parse::().unwrap(), ApiSet::List(vec![ - Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, ].into_iter().collect())); } } diff --git a/parity/signer.rs b/parity/signer.rs index 1ab53ea69..7f800f0e0 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -101,7 +101,7 @@ pub fn execute(cmd: Configuration) -> Result { } pub fn generate_token_and_url(conf: &Configuration) -> Result { - let code = generate_new_token(conf.signer_path.clone()).map_err(|err| format!("Error generating token: {:?}", err))?; + let code = generate_new_token(conf.signer_path.clone()).map_err(|err| format!("Error generating token: {}", err))?; let auth_url = format!("http://{}:{}/#/auth?token={}", conf.interface, conf.port, code); // And print in to the console Ok(NewToken { diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/rpc/src/v1/impls/eth_pubsub.rs new file mode 100644 index 000000000..202f2592a --- /dev/null +++ b/rpc/src/v1/impls/eth_pubsub.rs @@ -0,0 +1,153 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Eth PUB-SUB rpc implementation. + +use std::sync::Arc; +use std::collections::BTreeMap; + +use futures::{self, BoxFuture, Future}; +use jsonrpc_core::Error; +use jsonrpc_macros::Trailing; +use jsonrpc_macros::pubsub::{Sink, Subscriber}; +use jsonrpc_pubsub::SubscriptionId; + +use v1::helpers::{errors, Subscribers}; +use v1::metadata::Metadata; +use v1::traits::EthPubSub; +use v1::types::{pubsub, RichHeader}; + +use ethcore::encoded; +use ethcore::client::{BlockChainClient, ChainNotify, BlockId}; +use light::client::{LightChainClient, LightChainNotify}; +use parity_reactor::Remote; +use util::{Mutex, H256, Bytes}; + +/// Eth PubSub implementation. +pub struct EthPubSubClient { + handler: Arc>, + heads_subscribers: Arc>>>, +} + +impl EthPubSubClient { + /// Creates new `EthPubSubClient`. + pub fn new(client: Arc, remote: Remote) -> Self { + let heads_subscribers = Arc::new(Mutex::new(Subscribers::default())); + EthPubSubClient { + handler: Arc::new(ChainNotificationHandler { + client: client, + remote: remote, + heads_subscribers: heads_subscribers.clone(), + }), + heads_subscribers: heads_subscribers, + } + } + + /// Returns a chain notification handler. + pub fn handler(&self) -> Arc> { + self.handler.clone() + } +} + +/// PubSub Notification handler. +pub struct ChainNotificationHandler { + client: Arc, + remote: Remote, + heads_subscribers: Arc>>>, +} + +impl ChainNotificationHandler { + fn notify(&self, blocks: Vec<(encoded::Header, BTreeMap)>) { + for subscriber in self.heads_subscribers.lock().values() { + for &(ref block, ref extra_info) in &blocks { + self.remote.spawn(subscriber + .notify(pubsub::Result::Header(RichHeader { + inner: block.into(), + extra_info: extra_info.clone(), + })) + .map(|_| ()) + .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) + ); + } + } + } +} + +impl LightChainNotify for ChainNotificationHandler { + fn new_headers( + &self, + headers: &[H256], + ) { + let blocks = headers + .iter() + .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) + .map(|header| (header, Default::default())) + .collect(); + + self.notify(blocks); + } +} + +impl ChainNotify for ChainNotificationHandler { + fn new_blocks( + &self, + _imported: Vec, + _invalid: Vec, + enacted: Vec, + _retracted: Vec, + _sealed: Vec, + // Block bytes. + _proposed: Vec, + _duration: u64, + ) { + const EXTRA_INFO_PROOF: &'static str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; + let blocks = enacted + .into_iter() + .filter_map(|hash| self.client.block_header(BlockId::Hash(hash))) + .map(|header| { + let hash = header.hash(); + (header, self.client.block_extra_info(BlockId::Hash(hash)).expect(EXTRA_INFO_PROOF)) + }) + .collect(); + self.notify(blocks); + } +} + +impl EthPubSub for EthPubSubClient { + type Metadata = Metadata; + + fn subscribe( + &self, + _meta: Metadata, + subscriber: Subscriber, + kind: pubsub::Kind, + params: Trailing, + ) { + match (kind, params.0) { + (pubsub::Kind::NewHeads, pubsub::Params::None) => { + self.heads_subscribers.lock().push(subscriber) + }, + _ => { + let _ = subscriber.reject(errors::unimplemented(None)); + }, + } + } + + fn unsubscribe(&self, id: SubscriptionId) -> BoxFuture { + let res = self.heads_subscribers.lock().remove(&id).is_some(); + futures::future::ok(res).boxed() + } +} diff --git a/rpc/src/v1/impls/mod.rs b/rpc/src/v1/impls/mod.rs index a8691b32b..d3e0554c2 100644 --- a/rpc/src/v1/impls/mod.rs +++ b/rpc/src/v1/impls/mod.rs @@ -18,6 +18,7 @@ mod eth; mod eth_filter; +mod eth_pubsub; mod net; mod parity; mod parity_accounts; @@ -36,6 +37,7 @@ pub mod light; pub use self::eth::{EthClient, EthClientOptions}; pub use self::eth_filter::EthFilterClient; +pub use self::eth_pubsub::EthPubSubClient; pub use self::net::NetClient; pub use self::parity::ParityClient; pub use self::parity_accounts::ParityAccountsClient; diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 9c3673ee4..ae91af54c 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -48,7 +48,7 @@ use v1::types::{ TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, OperationsInfo, DappId, ChainStatus, - AccountInfo, HwAccountInfo, Header, RichHeader + AccountInfo, HwAccountInfo, RichHeader }; /// Parity implementation. @@ -411,25 +411,7 @@ impl Parity for ParityClient where }; future::ok(RichHeader { - inner: Header { - hash: Some(encoded.hash().into()), - size: Some(encoded.rlp().as_raw().len().into()), - parent_hash: encoded.parent_hash().into(), - uncles_hash: encoded.uncles_hash().into(), - author: encoded.author().into(), - miner: encoded.author().into(), - state_root: encoded.state_root().into(), - transactions_root: encoded.transactions_root().into(), - receipts_root: encoded.receipts_root().into(), - number: Some(encoded.number().into()), - gas_used: encoded.gas_used().into(), - gas_limit: encoded.gas_limit().into(), - logs_bloom: encoded.log_bloom().into(), - timestamp: encoded.timestamp().into(), - difficulty: encoded.difficulty().into(), - seal_fields: encoded.seal().into_iter().map(Into::into).collect(), - extra_data: Bytes::new(encoded.extra_data()), - }, + inner: encoded.into(), extra_info: client.block_extra_info(id).expect(EXTRA_INFO_PROOF), }).boxed() } diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 59aef84b3..bd8675196 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -58,7 +58,7 @@ pub mod traits; pub mod tests; pub mod types; -pub use self::traits::{Web3, Eth, EthFilter, EthSigning, Net, Parity, ParityAccounts, ParitySet, ParitySigning, PubSub, Signer, Personal, Traces, Rpc, SecretStore}; +pub use self::traits::{Web3, Eth, EthFilter, EthPubSub, EthSigning, Net, Parity, ParityAccounts, ParitySet, ParitySigning, PubSub, Signer, Personal, Traces, Rpc, SecretStore}; pub use self::impls::*; pub use self::helpers::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, block_import, informant, dispatch}; pub use self::metadata::Metadata; diff --git a/rpc/src/v1/tests/mocked/eth_pubsub.rs b/rpc/src/v1/tests/mocked/eth_pubsub.rs new file mode 100644 index 000000000..ae1165068 --- /dev/null +++ b/rpc/src/v1/tests/mocked/eth_pubsub.rs @@ -0,0 +1,104 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; + +use jsonrpc_core::MetaIoHandler; +use jsonrpc_core::futures::{self, Stream, Future}; +use jsonrpc_pubsub::Session; + +use v1::{EthPubSub, EthPubSubClient, Metadata}; + +use ethcore::client::{TestBlockChainClient, EachBlockWith, ChainNotify}; +use parity_reactor::EventLoop; + +#[test] +fn should_subscribe_to_new_heads() { + // given + let el = EventLoop::spawn(); + let mut client = TestBlockChainClient::new(); + // Insert some blocks + client.add_blocks(3, EachBlockWith::Nothing); + let h3 = client.block_hash_delta_minus(1); + let h2 = client.block_hash_delta_minus(2); + let h1 = client.block_hash_delta_minus(3); + + let pubsub = EthPubSubClient::new(Arc::new(client), el.remote()); + let handler = pubsub.handler(); + let pubsub = pubsub.to_delegate(); + + let mut io = MetaIoHandler::default(); + io.extend_with(pubsub); + + let mut metadata = Metadata::default(); + let (sender, receiver) = futures::sync::mpsc::channel(8); + metadata.session = Some(Arc::new(Session::new(sender))); + + // Subscribe + let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newHeads"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":1,"id":1}"#; + assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + + // Check notifications + handler.new_blocks(vec![], vec![], vec![h1], vec![], vec![], vec![], 0); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x1","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x1","parentHash":"0x0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":1}}"#; + assert_eq!(res, Some(response.into())); + + // Notify about two blocks + handler.new_blocks(vec![], vec![], vec![h2, h3], vec![], vec![], vec![], 0); + + // Receive both + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x2","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0x44e5ecf454ea99af9d8a8f2ca0daba96964c90de05db7a78f59b84ae9e749706","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x2","parentHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":1}}"#; + assert_eq!(res, Some(response.into())); + let (res, receiver) = receiver.into_future().wait().unwrap(); + let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x3","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0xdf04a98bb0c6fa8441bd429822f65a46d0cb553f6bcef602b973e65c81497f8e","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x3","parentHash":"0x44e5ecf454ea99af9d8a8f2ca0daba96964c90de05db7a78f59b84ae9e749706","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":1}}"#; + assert_eq!(res, Some(response.into())); + + // And unsubscribe + let request = r#"{"jsonrpc": "2.0", "method": "eth_unsubscribe", "params": [1], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!(io.handle_request_sync(request, metadata), Some(response.to_owned())); + + let (res, _receiver) = receiver.into_future().wait().unwrap(); + assert_eq!(res, None); +} + +#[test] +fn should_return_unimplemented() { + // given + let el = EventLoop::spawn(); + let client = TestBlockChainClient::new(); + let pubsub = EthPubSubClient::new(Arc::new(client), el.remote()); + let pubsub = pubsub.to_delegate(); + + let mut io = MetaIoHandler::default(); + io.extend_with(pubsub); + + let mut metadata = Metadata::default(); + let (sender, _receiver) = futures::sync::mpsc::channel(8); + metadata.session = Some(Arc::new(Session::new(sender))); + + // Subscribe + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"This request is not implemented yet. Please create an issue on Github repo."},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["newPendingTransactions"], "id": 1}"#; + assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["logs"], "id": 1}"#; + assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); + let request = r#"{"jsonrpc": "2.0", "method": "eth_subscribe", "params": ["syncing"], "id": 1}"#; + assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/mocked/mod.rs b/rpc/src/v1/tests/mocked/mod.rs index e5a459633..ae51c2be6 100644 --- a/rpc/src/v1/tests/mocked/mod.rs +++ b/rpc/src/v1/tests/mocked/mod.rs @@ -18,6 +18,7 @@ //! method calls properly. mod eth; +mod eth_pubsub; mod manage_network; mod net; mod parity; diff --git a/rpc/src/v1/traits/eth_pubsub.rs b/rpc/src/v1/traits/eth_pubsub.rs new file mode 100644 index 000000000..794d12768 --- /dev/null +++ b/rpc/src/v1/traits/eth_pubsub.rs @@ -0,0 +1,42 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Eth PUB-SUB rpc interface. + +use jsonrpc_core::Error; +use jsonrpc_macros::Trailing; +use jsonrpc_macros::pubsub::Subscriber; +use jsonrpc_pubsub::SubscriptionId; +use futures::BoxFuture; + +use v1::types::pubsub; + +build_rpc_trait! { + /// Eth PUB-SUB rpc interface. + pub trait EthPubSub { + type Metadata; + + #[pubsub(name = "eth_subscription")] { + /// Subscribe to Eth subscription. + #[rpc(name = "eth_subscribe")] + fn subscribe(&self, Self::Metadata, Subscriber, pubsub::Kind, Trailing); + + /// Unsubscribe from existing Eth subscription. + #[rpc(name = "eth_unsubscribe")] + fn unsubscribe(&self, SubscriptionId) -> BoxFuture; + } + } +} diff --git a/rpc/src/v1/traits/mod.rs b/rpc/src/v1/traits/mod.rs index 9cef58ee7..528463a4a 100644 --- a/rpc/src/v1/traits/mod.rs +++ b/rpc/src/v1/traits/mod.rs @@ -18,6 +18,7 @@ pub mod web3; pub mod eth; +pub mod eth_pubsub; pub mod eth_signing; pub mod net; pub mod parity; @@ -33,6 +34,7 @@ pub mod secretstore; pub use self::web3::Web3; pub use self::eth::{Eth, EthFilter}; +pub use self::eth_pubsub::EthPubSub; pub use self::eth_signing::EthSigning; pub use self::net::Net; pub use self::parity::Parity; diff --git a/rpc/src/v1/types/block.rs b/rpc/src/v1/types/block.rs index 4077d7221..50a081356 100644 --- a/rpc/src/v1/types/block.rs +++ b/rpc/src/v1/types/block.rs @@ -16,6 +16,9 @@ use std::ops::Deref; use std::collections::BTreeMap; + +use ethcore::encoded::Header as EthHeader; + use serde::{Serialize, Serializer}; use serde::ser::Error; use v1::types::{Bytes, Transaction, H160, H256, H2048, U256}; @@ -97,7 +100,7 @@ pub struct Block { } /// Block header representation. -#[derive(Debug, Serialize)] +#[derive(Debug, Serialize, PartialEq, Eq)] pub struct Header { /// Hash of the block pub hash: Option, @@ -146,6 +149,36 @@ pub struct Header { pub size: Option, } +impl From for Header { + fn from(h: EthHeader) -> Self { + (&h).into() + } +} + +impl<'a> From<&'a EthHeader> for Header { + fn from(h: &'a EthHeader) -> Self { + Header { + hash: Some(h.hash().into()), + size: Some(h.rlp().as_raw().len().into()), + parent_hash: h.parent_hash().into(), + uncles_hash: h.uncles_hash().into(), + author: h.author().into(), + miner: h.author().into(), + state_root: h.state_root().into(), + transactions_root: h.transactions_root().into(), + receipts_root: h.receipts_root().into(), + number: Some(h.number().into()), + gas_used: h.gas_used().into(), + gas_limit: h.gas_limit().into(), + logs_bloom: h.log_bloom().into(), + timestamp: h.timestamp().into(), + difficulty: h.difficulty().into(), + seal_fields: h.seal().into_iter().map(Into::into).collect(), + extra_data: h.extra_data().into(), + } + } +} + /// Block representation with additional info. pub type RichBlock = Rich; @@ -153,7 +186,7 @@ pub type RichBlock = Rich; pub type RichHeader = Rich
; /// Value representation with additional info -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct Rich { /// Standard value. pub inner: T, diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index 8ccac7efd..cd1d43fcb 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -22,7 +22,7 @@ use ethcore::client::BlockId; use v1::types::{BlockNumber, H160, H256, Log}; /// Variadic value -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Hash)] pub enum VariadicValue where T: Deserialize { /// Single Single(T), @@ -53,7 +53,7 @@ pub type FilterAddress = VariadicValue; pub type Topic = VariadicValue; /// Filter -#[derive(Debug, PartialEq, Clone, Deserialize)] +#[derive(Debug, PartialEq, Clone, Deserialize, Eq, Hash)] #[serde(deny_unknown_fields)] pub struct Filter { /// From Block diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index 7d0ae0541..97f2eb2ae 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -43,6 +43,8 @@ mod transaction_condition; mod uint; mod work; +pub mod pubsub; + pub use self::account_info::{AccountInfo, HwAccountInfo}; pub use self::bytes::Bytes; pub use self::block::{RichBlock, Block, BlockTransactions, Header, RichHeader, Rich}; diff --git a/rpc/src/v1/types/pubsub.rs b/rpc/src/v1/types/pubsub.rs new file mode 100644 index 000000000..8bc4f9079 --- /dev/null +++ b/rpc/src/v1/types/pubsub.rs @@ -0,0 +1,114 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Pub-Sub types. + +use serde::{Serialize, Serializer}; +use v1::types::{RichHeader, Filter}; + +/// Subscription result. +#[derive(Debug, PartialEq, Eq)] +pub enum Result { + /// New block header. + Header(RichHeader), +} + +impl Serialize for Result { + fn serialize(&self, serializer: S) -> ::std::result::Result + where S: Serializer + { + match *self { + Result::Header(ref header) => header.serialize(serializer), + } + } +} + +/// Subscription kind. +#[derive(Debug, Deserialize, PartialEq, Eq, Hash, Clone)] +#[serde(deny_unknown_fields)] +pub enum Kind { + /// New block headers subscription. + #[serde(rename="newHeads")] + NewHeads, + /// Logs subscription. + #[serde(rename="logs")] + Logs, + /// New Pending Transactions subscription. + #[serde(rename="newPendingTransactions")] + NewPendingTransactions, + /// Node syncing status subscription. + #[serde(rename="syncing")] + Syncing, +} + +/// Subscription kind. +#[derive(Debug, Deserialize, PartialEq, Eq, Hash, Clone)] +#[serde(deny_unknown_fields)] +pub enum Params { + /// No parameters passed. + None, + /// Log parameters. + Logs(Filter), +} + +impl Default for Params { + fn default() -> Self { + Params::None + } +} + +#[cfg(test)] +mod tests { + use serde_json; + use super::{Result, Kind}; + use v1::types::{RichHeader, Header}; + + #[test] + fn should_deserialize_kind() { + assert_eq!(serde_json::from_str::(r#""newHeads""#).unwrap(), Kind::NewHeads); + assert_eq!(serde_json::from_str::(r#""logs""#).unwrap(), Kind::Logs); + assert_eq!(serde_json::from_str::(r#""newPendingTransactions""#).unwrap(), Kind::NewPendingTransactions); + assert_eq!(serde_json::from_str::(r#""syncing""#).unwrap(), Kind::Syncing); + } + + #[test] + fn should_serialize_header() { + let header = Result::Header(RichHeader { + extra_info: Default::default(), + inner: Header { + hash: Some(Default::default()), + parent_hash: Default::default(), + uncles_hash: Default::default(), + author: Default::default(), + miner: Default::default(), + state_root: Default::default(), + transactions_root: Default::default(), + receipts_root: Default::default(), + number: Some(Default::default()), + gas_used: Default::default(), + gas_limit: Default::default(), + extra_data: Default::default(), + logs_bloom: Default::default(), + timestamp: Default::default(), + difficulty: Default::default(), + seal_fields: vec![Default::default(), Default::default()], + size: Some(69.into()), + }, + }); + let expected = r#"{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","sealFields":["0x","0x"],"sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000","size":"0x45","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000"}"#; + assert_eq!(serde_json::to_string(&header).unwrap(), expected); + } +} From a8d99ae4654526b155ca8c857eaf2fc3a4af6043 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Tue, 23 May 2017 12:28:22 +0200 Subject: [PATCH 17/29] use cargo workspace (#5601) * use cargo workspace * removed profiles for non root packages --- Cargo.lock | 161 ++++++++++++++++++++++-------------- Cargo.toml | 2 + ipc-common-types/Cargo.toml | 4 - logger/Cargo.toml | 4 - secret_store/Cargo.toml | 3 - stratum/Cargo.toml | 4 - updater/Cargo.toml | 4 - 7 files changed, 102 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 342dee62e..5b3aad881 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,61 +1,6 @@ [root] -name = "parity" -version = "1.7.0" -dependencies = [ - "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)", - "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore 1.7.0", - "ethcore-devtools 1.7.0", - "ethcore-io 1.7.0", - "ethcore-ipc 1.7.0", - "ethcore-ipc-hypervisor 1.2.0", - "ethcore-ipc-nano 1.7.0", - "ethcore-ipc-tests 0.1.0", - "ethcore-light 1.7.0", - "ethcore-logger 1.7.0", - "ethcore-secretstore 1.0.0", - "ethcore-signer 1.7.0", - "ethcore-stratum 1.7.0", - "ethcore-util 1.7.0", - "ethkey 0.2.0", - "ethsync 1.7.0", - "evmbin 0.1.0", - "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-dapps 1.7.0", - "parity-hash-fetch 1.7.0", - "parity-ipfs-api 1.7.0", - "parity-local-store 0.1.0", - "parity-reactor 0.1.0", - "parity-rpc 1.7.0", - "parity-rpc-client 1.4.0", - "parity-updater 1.7.0", - "path 0.1.0", - "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rlp 0.2.0", - "rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rpc-cli 1.4.0", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] +name = "using_queue" +version = "0.1.0" [[package]] name = "advapi32-sys" @@ -224,6 +169,14 @@ dependencies = [ "multihash 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "clippy" +version = "0.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "clippy" version = "0.0.103" @@ -232,6 +185,20 @@ dependencies = [ "clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "clippy_lints" +version = "0.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "clippy_lints" version = "0.0.103" @@ -488,8 +455,10 @@ name = "ethcore-ipc-codegen" version = "1.7.0" dependencies = [ "aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_syntax 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -573,6 +542,7 @@ version = "1.7.0" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", "ethcore-logger 1.7.0", @@ -717,6 +687,7 @@ dependencies = [ name = "ethjson" version = "0.1.0" dependencies = [ + "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.7.0", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -819,6 +790,7 @@ dependencies = [ name = "fetch" version = "0.1.0" dependencies = [ + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -905,6 +877,7 @@ dependencies = [ "libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1601,6 +1574,65 @@ dependencies = [ "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parity" +version = "1.7.0" +dependencies = [ + "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)", + "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore 1.7.0", + "ethcore-devtools 1.7.0", + "ethcore-io 1.7.0", + "ethcore-ipc 1.7.0", + "ethcore-ipc-hypervisor 1.2.0", + "ethcore-ipc-nano 1.7.0", + "ethcore-ipc-tests 0.1.0", + "ethcore-light 1.7.0", + "ethcore-logger 1.7.0", + "ethcore-secretstore 1.0.0", + "ethcore-signer 1.7.0", + "ethcore-stratum 1.7.0", + "ethcore-util 1.7.0", + "ethkey 0.2.0", + "ethsync 1.7.0", + "evmbin 0.1.0", + "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-dapps 1.7.0", + "parity-hash-fetch 1.7.0", + "parity-ipfs-api 1.7.0", + "parity-local-store 0.1.0", + "parity-reactor 0.1.0", + "parity-rpc 1.7.0", + "parity-rpc-client 1.4.0", + "parity-updater 1.7.0", + "path 0.1.0", + "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rlp 0.2.0", + "rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rpc-cli 1.4.0", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parity-dapps" version = "1.7.0" @@ -1975,6 +2007,14 @@ dependencies = [ "syntex_syntax 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "quasi_macros" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "quick-error" version = "1.1.0" @@ -2699,10 +2739,6 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "using_queue" -version = "0.1.0" - [[package]] name = "utf8-ranges" version = "1.0.0" @@ -2838,7 +2874,9 @@ dependencies = [ "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e53e6cdfa5ca294863e8c8a32a7cdb4dc0a442c8971d47a0e75b6c27ea268a6a" "checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32" +"checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b" "checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a" +"checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96" "checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591" "checksum core-foundation 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "20a6d0448d3a99d977ae4a2aa5a98d886a923e863e81ad9ff814645b6feb3bbd" "checksum core-foundation-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "05eed248dc504a5391c63794fe4fb64f46f071280afaa1b73308f3c0ce4574c5" @@ -2957,6 +2995,7 @@ dependencies = [ "checksum primal-sieve 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "7aa73fd87e5984a00bdb4c1b14d3d5d6d0bad01b2caaaf924c16ab7260ac946c" "checksum quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18c45c4854d6d1cf5d531db97c75880feb91c958b0720f4ec1057135fec358b3" "checksum quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9e25fa23c044c1803f43ca59c98dac608976dd04ce799411edd58ece776d4" +"checksum quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29cec87bc2816766d7e4168302d505dd06b0a825aed41b00633d296e922e02dd" "checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c" "checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a" "checksum quote 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)" = "6732e32663c9c271bfc7c1823486b471f18c47a2dbf87c066897b7b51afc83be" diff --git a/Cargo.toml b/Cargo.toml index 37effabb5..c0eccff28 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,3 +104,5 @@ name = "parity" debug = false lto = false panic = "abort" + +[workspace] diff --git a/ipc-common-types/Cargo.toml b/ipc-common-types/Cargo.toml index 715330334..f74f25e1e 100644 --- a/ipc-common-types/Cargo.toml +++ b/ipc-common-types/Cargo.toml @@ -13,7 +13,3 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } semver = "0.6" ethcore-ipc = { path = "../ipc/rpc" } ethcore-util = { path = "../util" } - -[profile.release] -debug = true -lto = false diff --git a/logger/Cargo.toml b/logger/Cargo.toml index 8d75b0516..84633ad31 100644 --- a/logger/Cargo.toml +++ b/logger/Cargo.toml @@ -15,7 +15,3 @@ time = "0.1" parking_lot = "0.4" arrayvec = "0.3" ansi_term = "0.9" - -[profile.release] -debug = true -lto = false diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index b087358be..d28209904 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -35,6 +35,3 @@ ethcore-logger = { path = "../logger" } ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } native-contracts = { path = "../ethcore/native_contracts" } - -[profile.release] -debug = true diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index 3bcf74f27..d9b6ed1b1 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -24,7 +24,3 @@ ethcore-ipc-nano = { path = "../ipc/nano" } futures = "0.1" tokio-core = "0.1" ethcore-logger = { path = "../logger" } - -[profile.release] -debug = true -lto = false diff --git a/updater/Cargo.toml b/updater/Cargo.toml index 252549b04..29767cf8b 100644 --- a/updater/Cargo.toml +++ b/updater/Cargo.toml @@ -22,7 +22,3 @@ ipc-common-types = { path = "../ipc-common-types" } ethcore-ipc = { path = "../ipc/rpc" } parity-reactor = { path = "../util/reactor" } path = { path = "../util/path" } - -[profile.release] -debug = true -lto = false From aa41b48ba06d36ab3b1fdda98b9091bc5a4336f8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 23 May 2017 06:31:09 -0400 Subject: [PATCH 18/29] Dynamically adjust PIP request costs based on gathered data (#5603) * beginnings of load timer * initial load timer implementation * saturating adds * create flow params from distribution * update request credits and acknowledgement * mark cumulative cost dead code * fix compilation * tests * supply load share and other params to lightprotocol params * add file store * fix ethsync compilation * reshuffle constants --- Cargo.lock | 59 ++++- ethcore/light/Cargo.toml | 3 + ethcore/light/src/lib.rs | 19 +- ethcore/light/src/net/load_timer.rs | 279 +++++++++++++++++++++++ ethcore/light/src/net/mod.rs | 192 ++++++++++++++-- ethcore/light/src/net/request_credits.rs | 76 ++++++ ethcore/light/src/net/request_set.rs | 2 + ethcore/light/src/net/tests/mod.rs | 96 ++++---- ethcore/light/src/types/request/mod.rs | 18 +- sync/src/api.rs | 19 +- sync/src/light_sync/tests/test_net.rs | 7 +- 11 files changed, 691 insertions(+), 79 deletions(-) create mode 100644 ethcore/light/src/net/load_timer.rs diff --git a/Cargo.lock b/Cargo.lock index 5b3aad881..cc02a63e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,6 +79,16 @@ dependencies = [ "rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "bincode" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bit-set" version = "0.2.0" @@ -504,6 +514,7 @@ dependencies = [ name = "ethcore-light" version = "1.7.0" dependencies = [ + "bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.7.0", "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", @@ -516,6 +527,8 @@ dependencies = [ "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", + "serde 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2282,12 +2295,17 @@ name = "serde" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "serde" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "serde_codegen_internals" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syn 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2297,7 +2315,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quote 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen_internals 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive_internals 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive_internals" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2405,10 +2442,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "syn" -version = "0.11.4" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quote 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synom" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2859,6 +2905,7 @@ dependencies = [ "checksum base-x 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f59103b47307f76e03bef1633aec7fa9e29bfb5aa6daf5a334f94233c71f6c1" "checksum base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9605ba46d61df0410d8ac686b0007add8172eba90e8e909c347856fe794d8c" "checksum bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "5d1b3ef6756498df0e2c6bb67c065f4154d0ecd721eb5b3c3f865c8012b9fd74" +"checksum bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e103c8b299b28a9c6990458b7013dc4a8356a9b854c51b9883241f5866fac36e" "checksum bit-set 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6e1e6fb1c9e3d6fcdec57216a74eaa03e41f52a22f13a16438251d8e88b89da" "checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c" "checksum bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5b97c2c8e8bbb4251754f559df8af22fb264853c7d009084a576cdf12565089d" @@ -3025,8 +3072,11 @@ dependencies = [ "checksum semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" "checksum serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0ae9a3c8b07c09dbe43022486d55a18c629a0618d2241e49829aaef9b6d862f9" +"checksum serde 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "991ef6be409a3b7a46cb9ee701d86156ce851825c65dbee7f16dbd5c4e7e2d47" "checksum serde_codegen_internals 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c3172bf2940b975c0e4f6ab42a511c0a4407d4f46ccef87a9d3615db5c26fa96" "checksum serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ecc6e0379ca933ece58302d2d3034443f06fbf38fd535857c1dc516195cbc3bf" +"checksum serde_derive 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9fd81eef9f0b4ec341b11095335b6a4b28ed85581b12dd27585dee1529df35e0" +"checksum serde_derive_internals 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "021c338d22c7e30f957a6ab7e388cb6098499dda9fd4ba1661ee074ca7a180d1" "checksum serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)" = "cf37ce931677e98b4fa5e6469aaa3ab4b6228309ea33b1b22d3ec055adfc4515" "checksum serde_urlencoded 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a81f15da4b9780e1524697f73b09076b6e42298ef673bead9ca8f848b334ef84" "checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c" @@ -3041,7 +3091,8 @@ dependencies = [ "checksum spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93bdab61c1a413e591c4d17388ffa859eaff2df27f1e13a5ec8b716700605adf" "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" -"checksum syn 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f94368aae82bb29656c98443a7026ca931a659e8d19dcdc41d6e273054e820" +"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" +"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" "checksum syntex 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)" = "35f3cc9d446323ef8fefad933b65cd6de271d29fa14a2e9d036a084770c6d6d5" "checksum syntex_errors 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3af03823ea45d420dd2c1a44bb074e13ea55f9b99afe960fd58eb4069b7f6cad" "checksum syntex_pos 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1e502a4a904d9f37cf975dbdbb0b08f2d111322f6792bda6eb095b4112c9a24b" diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 78210904e..1f8d48ac0 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -24,6 +24,9 @@ smallvec = "0.3.1" futures = "0.1" rand = "0.3" itertools = "0.5" +bincode = "0.8.0" +serde = "1.0" +serde_derive = "1.0" stats = { path = "../../util/stats" } [features] diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 5e970b837..18908a3f2 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -60,20 +60,25 @@ pub use self::provider::Provider; pub use self::transaction_queue::TransactionQueue; pub use types::request as request; +#[macro_use] +extern crate serde_derive; + #[macro_use] extern crate log; -extern crate ethcore; -extern crate ethcore_util as util; -extern crate ethcore_network as network; +extern crate bincode; extern crate ethcore_io as io; -extern crate rlp; -extern crate smallvec; -extern crate time; +extern crate ethcore_network as network; +extern crate ethcore_util as util; +extern crate ethcore; extern crate futures; -extern crate rand; extern crate itertools; +extern crate rand; +extern crate rlp; +extern crate serde; +extern crate smallvec; extern crate stats; +extern crate time; #[cfg(feature = "ipc")] extern crate ethcore_ipc as ipc; diff --git a/ethcore/light/src/net/load_timer.rs b/ethcore/light/src/net/load_timer.rs new file mode 100644 index 000000000..190747d40 --- /dev/null +++ b/ethcore/light/src/net/load_timer.rs @@ -0,0 +1,279 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Request load timer and distribution manager. +//! +//! This uses empirical samples of the length of time taken to respond +//! to requests in order to inform request credit costs. +//! +//! The average request time is determined by an exponential moving average +//! of the mean request times during the last `MOVING_SAMPLE_SIZE` time periods of +//! length `TIME_PERIOD_MS`, with the exception that time periods where no data is +//! gathered are excluded. + +use std::collections::{HashMap, VecDeque}; +use std::fs::File; +use std::path::PathBuf; + +use request::{CompleteRequest, Kind}; + +use bincode; +use time; +use util::{Uint, RwLock, Mutex}; + +/// Number of time periods samples should be kept for. +pub const MOVING_SAMPLE_SIZE: usize = 256; + +/// Stores rolling load timer samples. +// TODO: switch to bigint if possible (FP casts aren't available) +pub trait SampleStore: Send + Sync { + /// Load samples. + fn load(&self) -> HashMap>; + + /// Store all samples. + fn store(&self, samples: &HashMap>); +} + +// get a hardcoded, arbitrarily determined (but intended overestimate) +// of the time in nanoseconds to serve a request of the given kind. +// +// TODO: seed this with empirical data. +fn hardcoded_serve_time(kind: Kind) -> u64 { + match kind { + Kind::Headers => 500_000, + Kind::HeaderProof => 500_000, + Kind::Receipts => 1_000_000, + Kind::Body => 1_000_000, + Kind::Account => 1_500_000, + Kind::Storage => 2_000_000, + Kind::Code => 1_500_000, + Kind::Execution => 250, // per gas. + } +} + +/// A no-op store. +pub struct NullStore; + +impl SampleStore for NullStore { + fn load(&self) -> HashMap> { HashMap::new() } + fn store(&self, _samples: &HashMap>) { } +} + +/// Request load distributions. +pub struct LoadDistribution { + active_period: RwLock>>, + samples: RwLock>>, +} + +impl LoadDistribution { + /// Load rolling samples from the given store. + pub fn load(store: &SampleStore) -> Self { + let mut samples = store.load(); + + for kind_samples in samples.values_mut() { + while kind_samples.len() > MOVING_SAMPLE_SIZE { + kind_samples.pop_front(); + } + } + + LoadDistribution { + active_period: RwLock::new(HashMap::new()), + samples: RwLock::new(samples), + } + } + + /// Begin a timer. + pub fn begin_timer<'a>(&'a self, req: &CompleteRequest) -> LoadTimer<'a> { + let kind = req.kind(); + let n = match *req { + CompleteRequest::Headers(ref req) => req.max, + CompleteRequest::Execution(ref req) => req.gas.low_u64(), + _ => 1, + }; + + LoadTimer { + start: time::precise_time_ns(), + n: n, + dist: self, + kind: kind, + } + } + + /// Calculate EMA of load in nanoseconds for a specific request kind. + /// If there is no data for the given request kind, no EMA will be calculated, + /// but a hardcoded time will be returned. + pub fn expected_time_ns(&self, kind: Kind) -> u64 { + let samples = self.samples.read(); + samples.get(&kind).and_then(|s| { + if s.len() == 0 { return None } + + let alpha: f64 = 1f64 / s.len() as f64; + let start = s.front().expect("length known to be non-zero; qed").clone(); + let ema = s.iter().skip(1).fold(start as f64, |a, &c| { + (alpha * c as f64) + ((1.0 - alpha) * a) + }); + + Some(ema as u64) + }).unwrap_or_else(move || hardcoded_serve_time(kind)) + } + + /// End the current time period. Provide a store to + pub fn end_period(&self, store: &SampleStore) { + let active_period = self.active_period.read(); + let mut samples = self.samples.write(); + + for (&kind, set) in active_period.iter() { + let (elapsed, n) = ::std::mem::replace(&mut *set.lock(), (0, 0)); + if n == 0 { continue } + + let kind_samples = samples.entry(kind) + .or_insert_with(|| VecDeque::with_capacity(MOVING_SAMPLE_SIZE)); + + if kind_samples.len() == MOVING_SAMPLE_SIZE { kind_samples.pop_front(); } + kind_samples.push_back(elapsed / n); + } + + store.store(&*samples); + } + + fn update(&self, kind: Kind, elapsed: u64, n: u64) { + macro_rules! update_counters { + ($counters: expr) => { + $counters.0 = $counters.0.saturating_add(elapsed); + $counters.1 = $counters.1.saturating_add(n); + } + }; + + { + let set = self.active_period.read(); + if let Some(counters) = set.get(&kind) { + let mut counters = counters.lock(); + update_counters!(counters); + return; + } + } + + let mut set = self.active_period.write(); + let counters = set + .entry(kind) + .or_insert_with(|| Mutex::new((0, 0))); + + update_counters!(counters.get_mut()); + } +} + +/// A timer for a single request. +/// On drop, this will update the distribution. +pub struct LoadTimer<'a> { + start: u64, + n: u64, + dist: &'a LoadDistribution, + kind: Kind, +} + +impl<'a> Drop for LoadTimer<'a> { + fn drop(&mut self) { + let elapsed = time::precise_time_ns() - self.start; + self.dist.update(self.kind, elapsed, self.n); + } +} + +/// A store which writes directly to a file. +pub struct FileStore(pub PathBuf); + +impl SampleStore for FileStore { + fn load(&self) -> HashMap> { + File::open(&self.0) + .map_err(|e| Box::new(bincode::ErrorKind::IoError(e))) + .and_then(|mut file| bincode::deserialize_from(&mut file, bincode::Infinite)) + .unwrap_or_else(|_| HashMap::new()) + } + + fn store(&self, samples: &HashMap>) { + let res = File::create(&self.0) + .map_err(|e| Box::new(bincode::ErrorKind::IoError(e))) + .and_then(|mut file| bincode::serialize_into(&mut file, samples, bincode::Infinite)); + + if let Err(e) = res { + warn!(target: "pip", "Error writing light request timing samples to file: {}", e); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use request::Kind; + + #[test] + fn hardcoded_before_data() { + let dist = LoadDistribution::load(&NullStore); + assert_eq!(dist.expected_time_ns(Kind::Headers), hardcoded_serve_time(Kind::Headers)); + + dist.update(Kind::Headers, 100_000, 100); + dist.end_period(&NullStore); + + assert_eq!(dist.expected_time_ns(Kind::Headers), 1000); + } + + #[test] + fn moving_average() { + let dist = LoadDistribution::load(&NullStore); + + let mut sum = 0; + + for (i, x) in (0..10).map(|x| x * 10_000).enumerate() { + dist.update(Kind::Headers, x, 1); + dist.end_period(&NullStore); + + sum += x; + if i == 0 { continue } + + let moving_average = dist.expected_time_ns(Kind::Headers); + + // should be weighted below the maximum entry. + let arith_average = (sum as f64 / (i + 1) as f64) as u64; + assert!(moving_average < x); + + // when there are only 2 entries, they should be equal due to choice of + // ALPHA = 1/N. + // otherwise, the weight should be below the arithmetic mean because the much + // smaller previous values are discounted less. + if i == 1 { + assert_eq!(moving_average, arith_average); + } else { + assert!(moving_average < arith_average) + } + } + } + + #[test] + fn file_store() { + let path = ::devtools::RandomTempPath::new(); + let store = FileStore(path.as_path().clone()); + + let mut samples = store.load(); + assert!(samples.is_empty()); + samples.insert(Kind::Headers, vec![5, 2, 7, 2, 2, 4].into()); + samples.insert(Kind::Execution, vec![1, 1, 100, 250].into()); + + store.store(&samples); + + let dup = store.load(); + + assert_eq!(samples, dup); + } +} diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 798f68fe5..6ab5903df 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -38,11 +38,13 @@ use request::{Request, NetworkRequests as Requests, Response}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; use self::error::Punishment; +use self::load_timer::{LoadDistribution, NullStore}; use self::request_set::RequestSet; use self::id_guard::IdGuard; mod context; mod error; +mod load_timer; mod status; mod request_set; @@ -51,8 +53,9 @@ mod tests; pub mod request_credits; -pub use self::error::Error; pub use self::context::{BasicContext, EventContext, IoContext}; +pub use self::error::Error; +pub use self::load_timer::{SampleStore, FileStore}; pub use self::status::{Status, Capabilities, Announcement}; const TIMEOUT: TimerToken = 0; @@ -64,6 +67,9 @@ const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000; const PROPAGATE_TIMEOUT: TimerToken = 2; const PROPAGATE_TIMEOUT_INTERVAL_MS: u64 = 5000; +const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3; +const RECALCULATE_COSTS_INTERVAL_MS: u64 = 60 * 60 * 1000; + // minimum interval between updates. const UPDATE_INTERVAL_MS: i64 = 5000; @@ -88,13 +94,18 @@ mod packet { pub const REQUEST: u8 = 0x02; pub const RESPONSE: u8 = 0x03; + // request credits update and acknowledgement. + pub const UPDATE_CREDITS: u8 = 0x04; + pub const ACKNOWLEDGE_UPDATE: u8 = 0x05; + // relay transactions to peers. - pub const SEND_TRANSACTIONS: u8 = 0x04; + pub const SEND_TRANSACTIONS: u8 = 0x06; } // timeouts for different kinds of requests. all values are in milliseconds. mod timeout { pub const HANDSHAKE: i64 = 2500; + pub const ACKNOWLEDGE_UPDATE: i64 = 5000; pub const BASE: i64 = 1500; // base timeout for packet. // timeouts per request within packet. @@ -141,6 +152,9 @@ pub struct Peer { pending_requests: RequestSet, failed_requests: Vec, propagated_transactions: HashSet, + skip_update: bool, + local_flow: Arc, + awaiting_acknowledge: Option<(SteadyTime, Arc)>, } /// A light protocol event handler. @@ -176,14 +190,36 @@ pub trait Handler: Send + Sync { fn on_abort(&self) { } } -/// Protocol parameters. +/// Configuration. +pub struct Config { + /// How many stored seconds of credits peers should be able to accumulate. + pub max_stored_seconds: u64, + /// How much of the total load capacity each peer should be allowed to take. + pub load_share: f64, +} + +impl Default for Config { + fn default() -> Self { + const LOAD_SHARE: f64 = 1.0 / 25.0; + const MAX_ACCUMULATED: u64 = 60 * 5; // only charge for 5 minutes. + + Config { + max_stored_seconds: MAX_ACCUMULATED, + load_share: LOAD_SHARE, + } + } +} + +/// Protocol initialization parameters. pub struct Params { /// Network id. pub network_id: u64, - /// Request credits parameters. - pub flow_params: FlowParams, + /// Config. + pub config: Config, /// Initial capabilities. pub capabilities: Capabilities, + /// The sample store (`None` if data shouldn't persist between runs). + pub sample_store: Option>, } /// Type alias for convenience. @@ -249,14 +285,17 @@ mod id_guard { // on the peers, only one peer may be held at a time. pub struct LightProtocol { provider: Arc, + config: Config, genesis_hash: H256, network_id: u64, pending_peers: RwLock>, peers: RwLock, capabilities: RwLock, - flow_params: FlowParams, // assumed static and same for every peer. + flow_params: RwLock>, handlers: Vec>, req_id: AtomicUsize, + sample_store: Box, + load_distribution: LoadDistribution, } impl LightProtocol { @@ -265,16 +304,27 @@ impl LightProtocol { debug!(target: "pip", "Initializing light protocol handler"); let genesis_hash = provider.chain_info().genesis_hash; + let sample_store = params.sample_store.unwrap_or_else(|| Box::new(NullStore)); + let load_distribution = LoadDistribution::load(&*sample_store); + let flow_params = FlowParams::from_request_times( + |kind| load_distribution.expected_time_ns(kind), + params.config.load_share, + params.config.max_stored_seconds, + ); + LightProtocol { provider: provider, + config: params.config, genesis_hash: genesis_hash, network_id: params.network_id, pending_peers: RwLock::new(HashMap::new()), peers: RwLock::new(HashMap::new()), capabilities: RwLock::new(params.capabilities), - flow_params: params.flow_params, + flow_params: RwLock::new(Arc::new(flow_params)), handlers: Vec::new(), req_id: AtomicUsize::new(0), + sample_store: sample_store, + load_distribution: load_distribution, } } @@ -422,8 +472,9 @@ impl LightProtocol { let res = match peers.get(peer) { Some(peer_info) => { let mut peer_info = peer_info.lock(); + let peer_info: &mut Peer = &mut *peer_info; let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); - let cumulative_cost = peer_info.pending_requests.cumulative_cost(); + let last_batched = peer_info.pending_requests.is_empty(); let flow_info = peer_info.remote_flow.as_mut(); match (req_info, flow_info) { @@ -431,11 +482,14 @@ impl LightProtocol { let &mut (ref mut c, ref mut flow) = flow_info; // only update if the cumulative cost of the request set is zero. - if cumulative_cost == 0.into() { + // and this response wasn't from before request costs were updated. + if !peer_info.skip_update && last_batched { let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); c.update_to(actual_credits); } + if last_batched { peer_info.skip_update = false } + Ok(()) } (None, _) => Err(Error::UnsolicitedResponse), @@ -464,6 +518,9 @@ impl LightProtocol { packet::REQUEST => self.request(peer, io, rlp), packet::RESPONSE => self.response(peer, io, rlp), + packet::UPDATE_CREDITS => self.update_credits(peer, io, rlp), + packet::ACKNOWLEDGE_UPDATE => self.acknowledge_update(peer, io, rlp), + packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), other => { @@ -497,13 +554,22 @@ impl LightProtocol { } } - // request timeouts + // request and update ack timeouts + let ack_duration = Duration::milliseconds(timeout::ACKNOWLEDGE_UPDATE); { for (peer_id, peer) in self.peers.read().iter() { - if peer.lock().pending_requests.check_timeout(now) { + let peer = peer.lock(); + if peer.pending_requests.check_timeout(now) { debug!(target: "pip", "Peer {} request timeout", peer_id); io.disconnect_peer(*peer_id); } + + if let Some((ref start, _)) = peer.awaiting_acknowledge { + if *start + ack_duration <= now { + debug!(target: "pip", "Peer {} update acknowledgement timeout", peer_id); + io.disconnect_peer(*peer_id); + } + } } } } @@ -574,7 +640,8 @@ impl LightProtocol { }; let capabilities = self.capabilities.read().clone(); - let status_packet = status::write_handshake(&status, &capabilities, Some(&self.flow_params)); + let local_flow = self.flow_params.read(); + let status_packet = status::write_handshake(&status, &capabilities, Some(&**local_flow)); self.pending_peers.write().insert(*peer, PendingPeer { sent_head: chain_info.best_block_hash, @@ -628,6 +695,35 @@ impl LightProtocol { }) } } + + fn begin_new_cost_period(&self, io: &IoContext) { + self.load_distribution.end_period(&*self.sample_store); + + let new_params = Arc::new(FlowParams::from_request_times( + |kind| self.load_distribution.expected_time_ns(kind), + self.config.load_share, + self.config.max_stored_seconds, + )); + *self.flow_params.write() = new_params.clone(); + + let peers = self.peers.read(); + let now = SteadyTime::now(); + + let packet_body = { + let mut stream = RlpStream::new_list(3); + stream.append(new_params.limit()) + .append(new_params.recharge_rate()) + .append(new_params.cost_table()); + stream.out() + }; + + for (peer_id, peer_info) in peers.iter() { + let mut peer_info = peer_info.lock(); + + io.send(*peer_id, packet::UPDATE_CREDITS, packet_body.clone()); + peer_info.awaiting_acknowledge = Some((now.clone(), new_params.clone())); + } + } } impl LightProtocol { @@ -653,9 +749,10 @@ impl LightProtocol { } let remote_flow = flow_params.map(|params| (params.create_credits(), params)); + let local_flow = self.flow_params.read().clone(); self.peers.write().insert(*peer, Mutex::new(Peer { - local_credits: self.flow_params.create_credits(), + local_credits: local_flow.create_credits(), status: status.clone(), capabilities: capabilities.clone(), remote_flow: remote_flow, @@ -664,6 +761,9 @@ impl LightProtocol { pending_requests: RequestSet::default(), failed_requests: Vec::new(), propagated_transactions: HashSet::new(), + skip_update: false, + local_flow: local_flow, + awaiting_acknowledge: None, })); for handler in &self.handlers { @@ -739,6 +839,7 @@ impl LightProtocol { } }; let mut peer = peer.lock(); + let peer: &mut Peer = &mut *peer; let req_id: u64 = raw.val_at(0)?; let mut request_builder = RequestBuilder::default(); @@ -746,12 +847,13 @@ impl LightProtocol { trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); // deserialize requests, check costs and request validity. - self.flow_params.recharge(&mut peer.local_credits); + peer.local_flow.recharge(&mut peer.local_credits); - peer.local_credits.deduct_cost(self.flow_params.base_cost())?; + peer.local_credits.deduct_cost(peer.local_flow.base_cost())?; for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; - peer.local_credits.deduct_cost(self.flow_params.compute_cost(&request))?; + let cost = peer.local_flow.compute_cost(&request); + peer.local_credits.deduct_cost(cost)?; request_builder.push(request).map_err(|_| Error::BadBackReference)?; } @@ -761,6 +863,7 @@ impl LightProtocol { // respond to all requests until one fails. let responses = requests.respond_to_all(|complete_req| { + let _timer = self.load_distribution.begin_timer(&complete_req); match complete_req { CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers), CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof), @@ -804,6 +907,60 @@ impl LightProtocol { Ok(()) } + // handle an update of request credits parameters. + fn update_credits(&self, peer_id: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let peers = self.peers.read(); + + let peer = peers.get(peer_id).ok_or(Error::UnknownPeer)?; + let mut peer = peer.lock(); + + trace!(target: "pip", "Received an update to request credit params from peer {}", peer_id); + + { + let &mut (ref mut credits, ref mut old_params) = peer.remote_flow.as_mut().ok_or(Error::NotServer)?; + old_params.recharge(credits); + + let new_params = FlowParams::new( + raw.val_at(0)?, // limit + raw.val_at(2)?, // cost table + raw.val_at(1)?, // recharge. + ); + + // preserve ratio of current : limit when updating params. + credits.maintain_ratio(*old_params.limit(), *new_params.limit()); + *old_params = new_params; + } + + // set flag to true when there is an in-flight request + // corresponding to old flow params. + if !peer.pending_requests.is_empty() { + peer.skip_update = true; + } + + // let peer know we've acknowledged the update. + io.respond(packet::ACKNOWLEDGE_UPDATE, Vec::new()); + Ok(()) + } + + // handle an acknowledgement of request credits update. + fn acknowledge_update(&self, peer_id: &PeerId, _io: &IoContext, _raw: UntrustedRlp) -> Result<(), Error> { + let peers = self.peers.read(); + let peer = peers.get(peer_id).ok_or(Error::UnknownPeer)?; + let mut peer = peer.lock(); + + trace!(target: "pip", "Received an acknowledgement for new request credit params from peer {}", peer_id); + + let (_, new_params) = match peer.awaiting_acknowledge.take() { + Some(x) => x, + None => return Err(Error::UnsolicitedResponse), + }; + + let old_limit = *peer.local_flow.limit(); + peer.local_credits.maintain_ratio(old_limit, *new_params.limit()); + peer.local_flow = new_params; + Ok(()) + } + // Receive a set of transactions to relay. fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_TRANSACTIONS: usize = 256; @@ -850,6 +1007,8 @@ impl NetworkProtocolHandler for LightProtocol { .expect("Error registering sync timer."); io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL_MS) .expect("Error registering sync timer."); + io.register_timer(RECALCULATE_COSTS_TIMEOUT, RECALCULATE_COSTS_INTERVAL_MS) + .expect("Error registering request timer interval token."); } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { @@ -869,6 +1028,7 @@ impl NetworkProtocolHandler for LightProtocol { TIMEOUT => self.timeout_check(io), TICK_TIMEOUT => self.tick_handlers(io), PROPAGATE_TIMEOUT => self.propagate_transactions(io), + RECALCULATE_COSTS_TIMEOUT => self.begin_new_cost_period(io), _ => warn!(target: "pip", "received timeout on unknown token {}", timer), } } diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 0c94d7bc9..330576f9c 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -56,6 +56,11 @@ impl Credits { self.recharge_point = SteadyTime::now(); } + /// Maintain ratio to current limit against an old limit. + pub fn maintain_ratio(&mut self, old_limit: U256, new_limit: U256) { + self.estimate = (new_limit * self.estimate) / old_limit; + } + /// Attempt to apply the given cost to the amount of credits. /// /// If successful, the cost will be deducted successfully. @@ -188,6 +193,53 @@ impl FlowParams { } } + /// Create new flow parameters from , + /// proportion of total capacity which should be given to a peer, + /// and number of seconds of stored capacity a peer can accumulate. + pub fn from_request_times u64>( + request_time_ns: F, + load_share: f64, + max_stored_seconds: u64 + ) -> Self { + use request::Kind; + + let load_share = load_share.abs(); + + let recharge: u64 = 100_000_000; + let max = recharge.saturating_mul(max_stored_seconds); + + let cost_for_kind = |kind| { + // how many requests we can handle per second + let ns = request_time_ns(kind); + let second_duration = 1_000_000_000f64 / ns as f64; + + // scale by share of the load given to this peer. + let serve_per_second = second_duration * load_share; + let serve_per_second = serve_per_second.max(1.0 / 10_000.0); + + // as a percentage of the recharge per second. + U256::from((recharge as f64 / serve_per_second) as u64) + }; + + let costs = CostTable { + base: 0.into(), + headers: cost_for_kind(Kind::Headers), + body: cost_for_kind(Kind::Body), + receipts: cost_for_kind(Kind::Receipts), + account: cost_for_kind(Kind::Account), + storage: cost_for_kind(Kind::Storage), + code: cost_for_kind(Kind::Code), + header_proof: cost_for_kind(Kind::HeaderProof), + transaction_proof: cost_for_kind(Kind::Execution), + }; + + FlowParams { + costs: costs, + limit: max.into(), + recharge: recharge.into(), + } + } + /// Create effectively infinite flow params. pub fn free() -> Self { let free_cost: U256 = 0.into(); @@ -316,4 +368,28 @@ mod tests { assert_eq!(credits.estimate, 100.into()); } + + #[test] + fn scale_by_load_share_and_time() { + let flow_params = FlowParams::from_request_times( + |_| 10_000, + 0.05, + 60, + ); + + let flow_params2 = FlowParams::from_request_times( + |_| 10_000, + 0.1, + 60, + ); + + let flow_params3 = FlowParams::from_request_times( + |_| 5_000, + 0.05, + 60, + ); + + assert_eq!(flow_params2.costs, flow_params3.costs); + assert_eq!(flow_params.costs.headers, flow_params2.costs.headers * 2.into()); + } } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index c5608050f..1277c2615 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -120,6 +120,8 @@ impl RequestSet { pub fn is_empty(&self) -> bool { self.len() == 0 } /// The cumulative cost of all requests in the set. + // this may be useful later for load balancing. + #[allow(dead_code)] pub fn cumulative_cost(&self) -> U256 { self.cumulative_cost } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 94788a727..a4c9754ba 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -24,9 +24,8 @@ use ethcore::transaction::{Action, PendingTransaction}; use ethcore::encoded; use network::{PeerId, NodeId}; -use net::request_credits::FlowParams; use net::context::IoContext; -use net::status::{Capabilities, Status, write_handshake}; +use net::status::{Capabilities, Status}; use net::{LightProtocol, Params, packet, Peer}; use provider::Provider; use request; @@ -162,10 +161,6 @@ impl Provider for TestProvider { } } -fn make_flow_params() -> FlowParams { - FlowParams::new(5_000_000.into(), Default::default(), 100_000.into()) -} - fn capabilities() -> Capabilities { Capabilities { serve_headers: true, @@ -175,16 +170,22 @@ fn capabilities() -> Capabilities { } } +fn write_handshake(status: &Status, capabilities: &Capabilities, proto: &LightProtocol) -> Vec { + let flow_params = proto.flow_params.read().clone(); + ::net::status::write_handshake(status, capabilities, Some(&*flow_params)) +} + // helper for setting up the protocol handler and provider. -fn setup(flow_params: FlowParams, capabilities: Capabilities) -> (Arc, LightProtocol) { +fn setup(capabilities: Capabilities) -> (Arc, LightProtocol) { let provider = Arc::new(TestProviderInner { client: TestBlockChainClient::new(), }); let proto = LightProtocol::new(Arc::new(TestProvider(provider.clone())), Params { network_id: 2, - flow_params: flow_params, + config: Default::default(), capabilities: capabilities, + sample_store: None, }); (provider, proto) @@ -204,14 +205,13 @@ fn status(chain_info: BlockChainInfo) -> Status { #[test] fn handshake_expected() { - let flow_params = make_flow_params(); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); let status = status(provider.client.chain_info()); - let packet_body = write_handshake(&status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); } @@ -219,42 +219,40 @@ fn handshake_expected() { #[test] #[should_panic] fn genesis_mismatch() { - let flow_params = make_flow_params(); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); let mut status = status(provider.client.chain_info()); status.genesis_hash = H256::default(); - let packet_body = write_handshake(&status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); } #[test] fn credit_overflow() { - let flow_params = make_flow_params(); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); let status = status(provider.client.chain_info()); { - let packet_body = write_handshake(&status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); } { - let my_status = write_handshake(&status, &capabilities, Some(&flow_params)); + let my_status = write_handshake(&status, &capabilities, &proto); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } - // 1000 requests is far too many for the default flow params. + // 1 billion requests is far too many for the default flow params. let requests = encode_single(Request::Headers(IncompleteHeadersRequest { start: HashOrNumber::Number(1).into(), - max: 1000, + max: 1_000_000_000, skip: 0, reverse: false, })); @@ -268,20 +266,20 @@ fn credit_overflow() { #[test] fn get_block_headers() { - let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); - let my_status = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let my_status = write_handshake(&cur_status, &capabilities, &proto); provider.client.add_blocks(100, EachBlockWith::Nothing); let cur_status = status(provider.client.chain_info()); { - let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&cur_status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } @@ -320,20 +318,20 @@ fn get_block_headers() { #[test] fn get_block_bodies() { - let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); - let my_status = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let my_status = write_handshake(&cur_status, &capabilities, &proto); provider.client.add_blocks(100, EachBlockWith::Nothing); let cur_status = status(provider.client.chain_info()); { - let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&cur_status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } @@ -368,20 +366,20 @@ fn get_block_bodies() { #[test] fn get_block_receipts() { - let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); - let my_status = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let my_status = write_handshake(&cur_status, &capabilities, &proto); provider.client.add_blocks(1000, EachBlockWith::Nothing); let cur_status = status(provider.client.chain_info()); { - let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&cur_status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } @@ -423,16 +421,17 @@ fn get_block_receipts() { #[test] fn get_state_proofs() { - let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); + let provider = TestProvider(provider); let cur_status = status(provider.0.client.chain_info()); { - let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&cur_status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); } @@ -481,15 +480,15 @@ fn get_state_proofs() { #[test] fn get_contract_code() { - let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); { - let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&cur_status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); } @@ -524,15 +523,15 @@ fn get_contract_code() { #[test] fn proof_of_execution() { - let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); { - let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + let packet_body = write_handshake(&cur_status, &capabilities, &proto); proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); } @@ -553,7 +552,11 @@ fn proof_of_execution() { let request_body = make_packet(req_id, &requests); let response = { - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); + let limit = *flow_params.limit(); + let cost = flow_params.compute_cost_multi(requests.requests()); + + println!("limit = {}, cost = {}", limit, cost); + let new_creds = limit - cost; let mut response_stream = RlpStream::new_list(3); response_stream.append(&req_id).append(&new_creds).begin_list(0); @@ -581,10 +584,10 @@ fn id_guard() { use super::request_set::RequestSet; use super::ReqId; - let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); let capabilities = capabilities(); - let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); let req_id_1 = ReqId(5143); let req_id_2 = ReqId(1111); @@ -607,12 +610,15 @@ fn id_guard() { local_credits: flow_params.create_credits(), status: status(provider.client.chain_info()), capabilities: capabilities.clone(), - remote_flow: Some((flow_params.create_credits(), flow_params)), + remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())), sent_head: provider.client.chain_info().best_block_hash, last_update: ::time::SteadyTime::now(), pending_requests: pending_requests, failed_requests: Vec::new(), propagated_transactions: Default::default(), + skip_update: false, + local_flow: flow_params, + awaiting_acknowledge: None, })); // first, malformed responses. diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 3dd2db629..3953aa88b 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -256,6 +256,22 @@ pub enum CompleteRequest { Execution(CompleteExecutionRequest), } +impl CompleteRequest { + /// Inspect the kind of this response. + pub fn kind(&self) -> Kind { + match *self { + CompleteRequest::Headers(_) => Kind::Headers, + CompleteRequest::HeaderProof(_) => Kind::HeaderProof, + CompleteRequest::Receipts(_) => Kind::Receipts, + CompleteRequest::Body(_) => Kind::Body, + CompleteRequest::Account(_) => Kind::Account, + CompleteRequest::Storage(_) => Kind::Storage, + CompleteRequest::Code(_) => Kind::Code, + CompleteRequest::Execution(_) => Kind::Execution, + } + } +} + impl Request { /// Get the request kind. pub fn kind(&self) -> Kind { @@ -396,7 +412,7 @@ impl CheckedRequest for Request { /// Kinds of requests. /// Doubles as the "ID" field of the request. #[repr(u8)] -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)] pub enum Kind { /// A request for headers. Headers = 0, diff --git a/sync/src/api.rs b/sync/src/api.rs index 3e3234d84..edf83ee17 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -195,21 +195,33 @@ pub struct EthSync { impl EthSync { /// Creates and register protocol with the network service pub fn new(params: Params) -> Result, NetworkError> { + const MAX_LIGHTSERV_LOAD: f64 = 0.5; + let pruning_info = params.chain.pruning_info(); let light_proto = match params.config.serve_light { false => None, true => Some({ - let light_params = LightParams { + let sample_store = params.network_config.net_config_path + .clone() + .map(::std::path::PathBuf::from) + .map(|mut p| { p.push("request_timings"); light_net::FileStore(p) }) + .map(|store| Box::new(store) as Box<_>); + + let mut light_params = LightParams { network_id: params.config.network_id, - flow_params: Default::default(), + config: Default::default(), capabilities: Capabilities { serve_headers: true, serve_chain_since: Some(pruning_info.earliest_chain), serve_state_since: Some(pruning_info.earliest_state), tx_relay: true, }, + sample_store: sample_store, }; + let max_peers = ::std::cmp::min(params.network_config.max_peers, 1); + light_params.config.load_share = MAX_LIGHTSERV_LOAD / max_peers as f64; + let mut light_proto = LightProtocol::new(params.provider, light_params); light_proto.add_handler(Arc::new(TxRelay(params.chain.clone()))); @@ -686,13 +698,14 @@ impl LightSync { let (sync, light_proto) = { let light_params = LightParams { network_id: params.network_id, - flow_params: Default::default(), // or `None`? + config: Default::default(), capabilities: Capabilities { serve_headers: false, serve_chain_since: None, serve_state_since: None, tx_relay: false, }, + sample_store: None, }; let mut light_proto = LightProtocol::new(params.client.clone(), light_params); diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index 1da4d1659..525216a7e 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -27,7 +27,6 @@ use ethcore::spec::Spec; use io::IoChannel; use light::client::Client as LightClient; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; -use light::net::request_credits::FlowParams; use light::provider::LightProvider; use network::{NodeId, PeerId}; use util::RwLock; @@ -88,13 +87,14 @@ impl Peer { pub fn new_full(chain: Arc) -> Self { let params = LightParams { network_id: NETWORK_ID, - flow_params: FlowParams::free(), + config: Default::default(), capabilities: Capabilities { serve_headers: true, serve_chain_since: None, serve_state_since: None, tx_relay: true, }, + sample_store: None, }; let proto = LightProtocol::new(chain.clone(), params); @@ -110,13 +110,14 @@ impl Peer { let sync = Arc::new(LightSync::new(chain.clone()).unwrap()); let params = LightParams { network_id: NETWORK_ID, - flow_params: FlowParams::default(), + config: Default::default(), capabilities: Capabilities { serve_headers: false, serve_chain_since: None, serve_state_since: None, tx_relay: false, }, + sample_store: None, }; let provider = LightProvider::new(chain.clone(), Arc::new(RwLock::new(Default::default()))); From 386cdb830dbe0d98e0a1c0db8304a0c18ad8d182 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 23 May 2017 06:39:25 -0400 Subject: [PATCH 19/29] Back-references for the on-demand service (#5573) * header back-references for on demand * initial back-reference implementation for on demand requests * answer requests from cache * answer requests from cache, add tests * strongly typed responses for vectors of homogeneous requests * fix fallout in RPC without optimizing --- ethcore/light/src/on_demand/mod.rs | 371 ++++++++----------- ethcore/light/src/on_demand/request.rs | 391 ++++++++++++++++----- ethcore/light/src/on_demand/tests.rs | 120 ++++++- ethcore/light/src/types/request/builder.rs | 53 ++- ethcore/light/src/types/request/mod.rs | 29 +- parity/light_helpers/queue_cull.rs | 36 +- rpc/src/v1/helpers/dispatch.rs | 35 +- rpc/src/v1/helpers/light_fetch.rs | 144 +++++--- rpc/src/v1/impls/light/eth.rs | 25 +- 9 files changed, 778 insertions(+), 426 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index e61c126d6..435c72cf7 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -22,24 +22,19 @@ use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; -use ethcore::basic_account::BasicAccount; -use ethcore::encoded; -use ethcore::receipt::Receipt; use ethcore::executed::{Executed, ExecutionError}; -use futures::{future, Async, Poll, Future, BoxFuture}; +use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver, Canceled}; use network::PeerId; -use rlp::RlpStream; -use util::{Bytes, RwLock, Mutex, U256, H256}; -use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP}; +use util::{RwLock, Mutex}; use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; use request::{self as basic_request, Request as NetworkRequest}; use self::request::CheckedRequest; -pub use self::request::{Request, Response}; +pub use self::request::{Request, Response, HeaderRef}; #[cfg(test)] mod tests; @@ -75,6 +70,98 @@ struct Pending { sender: oneshot::Sender>, } +impl Pending { + // answer as many of the given requests from the supplied cache as possible. + // TODO: support re-shuffling. + fn answer_from_cache(&mut self, cache: &Mutex) { + while !self.requests.is_complete() { + let idx = self.requests.num_answered(); + match self.requests[idx].respond_local(cache) { + Some(response) => { + self.requests.supply_response_unchecked(&response); + self.update_header_refs(idx, &response); + self.responses.push(response); + } + None => break, + } + } + } + + // update header refs if the given response contains a header future requests require for + // verification. + // `idx` is the index of the request the response corresponds to. + fn update_header_refs(&mut self, idx: usize, response: &Response) { + match *response { + Response::HeaderByHash(ref hdr) => { + // fill the header for all requests waiting on this one. + // TODO: could be faster if we stored a map usize => Vec + // but typical use just has one header request that others + // depend on. + for r in self.requests.iter_mut().skip(idx + 1) { + if r.needs_header().map_or(false, |(i, _)| i == idx) { + r.provide_header(hdr.clone()) + } + } + } + _ => {}, // no other responses produce headers. + } + } + + // supply a response. + fn supply_response(&mut self, cache: &Mutex, response: &basic_request::Response) + -> Result<(), basic_request::ResponseError> + { + match self.requests.supply_response(&cache, response) { + Ok(response) => { + let idx = self.responses.len(); + self.update_header_refs(idx, &response); + self.responses.push(response); + Ok(()) + } + Err(e) => Err(e), + } + } + + // if the requests are complete, send the result and consume self. + fn try_complete(self) -> Option { + if self.requests.is_complete() { + let _ = self.sender.send(self.responses); + None + } else { + Some(self) + } + } + + fn fill_unanswered(&mut self) { + self.requests.fill_unanswered(); + } + + // update the cached network requests. + fn update_net_requests(&mut self) { + use request::IncompleteRequest; + + let mut builder = basic_request::RequestBuilder::default(); + let num_answered = self.requests.num_answered(); + let mut mapping = move |idx| idx - num_answered; + + for request in self.requests.iter().skip(num_answered) { + let mut net_req = request.clone().into_net_request(); + + // all back-references with request index less than `num_answered` have + // been filled by now. all remaining requests point to nothing earlier + // than the next unanswered request. + net_req.adjust_refs(&mut mapping); + builder.push(net_req) + .expect("all back-references to answered requests have been filled; qed"); + } + + // update pending fields. + let capabilities = guess_capabilities(&self.requests[num_answered..]); + self.net_requests = builder.build(); + self.required_capabilities = capabilities; + } +} + // helper to guess capabilities required for a given batch of network requests. fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities { let mut caps = Capabilities { @@ -97,16 +184,21 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities { caps.serve_headers = true, CheckedRequest::HeaderByHash(_, _) => caps.serve_headers = true, - CheckedRequest::Body(ref req, _) => - update_since(&mut caps.serve_chain_since, req.header.number()), - CheckedRequest::Receipts(ref req, _) => - update_since(&mut caps.serve_chain_since, req.0.number()), - CheckedRequest::Account(ref req, _) => - update_since(&mut caps.serve_state_since, req.header.number()), - CheckedRequest::Code(ref req, _) => - update_since(&mut caps.serve_state_since, req.block_id.1), - CheckedRequest::Execution(ref req, _) => - update_since(&mut caps.serve_state_since, req.header.number()), + CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() { + update_since(&mut caps.serve_chain_since, hdr.number()); + }, + CheckedRequest::Receipts(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() { + update_since(&mut caps.serve_chain_since, hdr.number()); + }, + CheckedRequest::Account(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() { + update_since(&mut caps.serve_state_since, hdr.number()); + }, + CheckedRequest::Code(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() { + update_since(&mut caps.serve_state_since, hdr.number()); + }, + CheckedRequest::Execution(ref req, _) => if let Ok(ref hdr) = req.header.as_ref() { + update_since(&mut caps.serve_state_since, hdr.number()); + }, } } @@ -163,158 +255,6 @@ impl OnDemand { me } - /// Request a header's hash by block number and CHT root hash. - /// Returns the hash. - pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture { - let cached = { - let mut cache = self.cache.lock(); - cache.block_hash(&req.num()) - }; - - match cached { - Some(hash) => future::ok(hash).boxed(), - None => { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .map(|(h, _)| h) - .boxed() - }, - } - } - - /// Request a canonical block's chain score. - /// Returns the chain score. - pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture { - let cached = { - let mut cache = self.cache.lock(); - cache.block_hash(&req.num()).and_then(|hash| cache.chain_score(&hash)) - }; - - match cached { - Some(score) => future::ok(score).boxed(), - None => { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .map(|(_, s)| s) - .boxed() - }, - } - } - - /// Request a canonical block's hash and chain score by number. - /// Returns the hash and chain score. - pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture<(H256, U256), Canceled> { - let cached = { - let mut cache = self.cache.lock(); - let hash = cache.block_hash(&req.num()); - ( - hash.clone(), - hash.and_then(|hash| cache.chain_score(&hash)), - ) - }; - - match cached { - (Some(hash), Some(score)) => future::ok((hash, score)).boxed(), - _ => { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .boxed() - }, - } - } - - /// Request a header by hash. This is less accurate than by-number because we don't know - /// where in the chain this header lies, and therefore can't find a peer who is supposed to have - /// it as easily. - pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> BoxFuture { - match { self.cache.lock().block_header(&req.0) } { - Some(hdr) => future::ok(hdr).boxed(), - None => { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .boxed() - }, - } - } - - /// Request a block, given its header. Block bodies are requestable by hash only, - /// and the header is required anyway to verify and complete the block body - /// -- this just doesn't obscure the network query. - pub fn block(&self, ctx: &BasicContext, req: request::Body) -> BoxFuture { - // fast path for empty body. - if req.header.transactions_root() == SHA3_NULL_RLP && req.header.uncles_hash() == SHA3_EMPTY_LIST_RLP { - let mut stream = RlpStream::new_list(3); - stream.append_raw(&req.header.into_inner(), 1); - stream.begin_list(0); - stream.begin_list(0); - - future::ok(encoded::Block::new(stream.out())).boxed() - } else { - match { self.cache.lock().block_body(&req.hash) } { - Some(body) => { - let mut stream = RlpStream::new_list(3); - let body = body.rlp(); - stream.append_raw(&req.header.into_inner(), 1); - stream.append_raw(&body.at(0).as_raw(), 1); - stream.append_raw(&body.at(1).as_raw(), 1); - - future::ok(encoded::Block::new(stream.out())).boxed() - } - None => { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .boxed() - } - } - } - } - - /// Request the receipts for a block. The header serves two purposes: - /// provide the block hash to fetch receipts for, and for verification of the receipts root. - pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> BoxFuture, Canceled> { - // fast path for empty receipts. - if req.0.receipts_root() == SHA3_NULL_RLP { - return future::ok(Vec::new()).boxed() - } - - match { self.cache.lock().block_receipts(&req.0.hash()) } { - Some(receipts) => future::ok(receipts).boxed(), - None => { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .boxed() - }, - } - } - - /// Request an account by address and block header -- which gives a hash to query and a state root - /// to verify against. - /// `None` here means that no account by the queried key exists in the queried state. - pub fn account(&self, ctx: &BasicContext, req: request::Account) -> BoxFuture, Canceled> { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .boxed() - } - - /// Request code by address, known code hash, and block header. - pub fn code(&self, ctx: &BasicContext, req: request::Code) -> BoxFuture { - // fast path for no code. - if req.code_hash == SHA3_EMPTY { - future::ok(Vec::new()).boxed() - } else { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .boxed() - } - } - - /// Request proof-of-execution for a transaction. - pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> BoxFuture { - self.request(ctx, req) - .expect("request given fully fleshed out; qed") - .boxed() - } - /// Submit a vector of requests to be processed together. /// /// Fails if back-references are not coherent. @@ -332,15 +272,33 @@ impl OnDemand { let mut builder = basic_request::RequestBuilder::default(); let responses = Vec::with_capacity(requests.len()); - for request in requests { - builder.push(CheckedRequest::from(request))?; + + let mut header_producers = HashMap::new(); + for (i, request) in requests.into_iter().enumerate() { + let request = CheckedRequest::from(request); + + // ensure that all requests needing headers will get them. + if let Some((idx, field)) = request.needs_header() { + // a request chain with a header back-reference is valid only if it both + // points to a request that returns a header and has the same back-reference + // for the block hash. + match header_producers.get(&idx) { + Some(ref f) if &field == *f => {} + _ => return Err(basic_request::NoSuchOutput), + } + } + if let CheckedRequest::HeaderByHash(ref req, _) = request { + header_producers.insert(i, req.0.clone()); + } + + builder.push(request)?; } let requests = builder.build(); let net_requests = requests.clone().map_requests(|req| req.into_net_request()); let capabilities = guess_capabilities(requests.requests()); - self.pending.write().push(Pending { + self.submit_pending(ctx, Pending { requests: requests, net_requests: net_requests, required_capabilities: capabilities, @@ -348,8 +306,6 @@ impl OnDemand { sender: sender, }); - self.attempt_dispatch(ctx); - Ok(receiver) } @@ -430,6 +386,19 @@ impl OnDemand { }) .collect(); // `pending` now contains all requests we couldn't dispatch. } + + // submit a pending request set. attempts to answer from cache before + // going to the network. if complete, sends response and consumes the struct. + fn submit_pending(&self, ctx: &BasicContext, mut pending: Pending) { + // answer as many requests from cache as we can, and schedule for dispatch + // if incomplete. + pending.answer_from_cache(&*self.cache); + if let Some(mut pending) = pending.try_complete() { + pending.update_net_requests(); + self.pending.write().push(pending); + self.attempt_dispatch(ctx); + } + } } impl Handler for OnDemand { @@ -468,63 +437,27 @@ impl Handler for OnDemand { } fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { - use request::IncompleteRequest; - let mut pending = match self.in_transit.write().remove(&req_id) { Some(req) => req, None => return, }; // for each incoming response - // 1. ensure verification data filled. (still TODO since on_demand doesn't use back-references yet) + // 1. ensure verification data filled. // 2. pending.requests.supply_response // 3. if extracted on-demand response, keep it for later. for response in responses { - match pending.requests.supply_response(&*self.cache, response) { - Ok(response) => { - pending.responses.push(response) - } - Err(e) => { - let peer = ctx.peer(); - debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e); - ctx.disable_peer(peer); + if let Err(e) = pending.supply_response(&*self.cache, response) { + let peer = ctx.peer(); + debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e); + ctx.disable_peer(peer); - break; - } + break; } } - pending.requests.fill_unanswered(); - if pending.requests.is_complete() { - let _ = pending.sender.send(pending.responses); - - return; - } - - - // update network requests (unless we're done, in which case fulfill the future.) - let mut builder = basic_request::RequestBuilder::default(); - let num_answered = pending.requests.num_answered(); - let mut mapping = move |idx| idx - num_answered; - - for request in pending.requests.requests().iter().skip(num_answered) { - let mut net_req = request.clone().into_net_request(); - - // all back-references with request index less than `num_answered` have - // been filled by now. all remaining requests point to nothing earlier - // than the next unanswered request. - net_req.adjust_refs(&mut mapping); - builder.push(net_req) - .expect("all back-references to answered requests have been filled; qed"); - } - - // update pending fields and re-queue. - let capabilities = guess_capabilities(&pending.requests.requests()[num_answered..]); - pending.net_requests = builder.build(); - pending.required_capabilities = capabilities; - - self.pending.write().push(pending); - self.attempt_dispatch(ctx.as_basic()); + pending.fill_unanswered(); + self.submit_pending(ctx.as_basic(), pending); } fn tick(&self, ctx: &BasicContext) { diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index c9a5c4d9b..3ad9a28d5 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -26,12 +26,12 @@ use ethcore::receipt::Receipt; use ethcore::state::{self, ProvedExecution}; use ethcore::transaction::SignedTransaction; -use request::{self as net_request, IncompleteRequest, Output, OutputKind}; +use request::{self as net_request, IncompleteRequest, CompleteRequest, Output, OutputKind, Field}; use rlp::{RlpStream, UntrustedRlp}; use util::{Address, Bytes, DBValue, HashDB, Mutex, H256, U256}; use util::memorydb::MemoryDB; -use util::sha3::Hashable; +use util::sha3::{Hashable, SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP}; use util::trie::{Trie, TrieDB, TrieError}; const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; enforced by `check_response`; qed"; @@ -87,6 +87,18 @@ pub trait RequestAdapter { fn extract_from(Vec) -> Self::Out; } +impl RequestAdapter for Vec { + type Out = Vec; + + fn make_requests(self) -> Vec { + self.into_iter().map(RequestArg::make).collect() + } + + fn extract_from(r: Vec) -> Self::Out { + r.into_iter().map(T::extract).collect() + } +} + // helper to implement `RequestArg` and `From` for a single request kind. macro_rules! impl_single { ($variant: ident, $me: ty, $out: ty) => { @@ -173,6 +185,50 @@ mod impls { impl_args!(A, B, C, D, E, F, G, H, I, J, K, L,); } +/// A block header to be used for verification. +/// May be stored or an unresolved output of a prior request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum HeaderRef { + /// A stored header. + Stored(encoded::Header), + /// An unresolved header. The first item here is the index of the request which + /// will return the header. The second is a back-reference pointing to a block hash + /// which can be used to make requests until that header is resolved. + Unresolved(usize, Field), +} + +impl HeaderRef { + /// Attempt to inspect the header. + pub fn as_ref(&self) -> Result<&encoded::Header, Error> { + match *self { + HeaderRef::Stored(ref hdr) => Ok(hdr), + HeaderRef::Unresolved(idx, _) => Err(Error::UnresolvedHeader(idx)), + } + } + + // get the blockhash field to be used in requests. + fn field(&self) -> Field { + match *self { + HeaderRef::Stored(ref hdr) => Field::Scalar(hdr.hash()), + HeaderRef::Unresolved(_, ref field) => field.clone(), + } + } + + // yield the index of the request which will produce the header. + fn needs_header(&self) -> Option<(usize, Field)> { + match *self { + HeaderRef::Stored(_) => None, + HeaderRef::Unresolved(idx, ref field) => Some((idx, field.clone())), + } + } +} + +impl From for HeaderRef { + fn from(header: encoded::Header) -> Self { + HeaderRef::Stored(header) + } +} + /// Requests coupled with their required data for verification. /// This is used internally but not part of the public API. #[derive(Clone)] @@ -192,7 +248,7 @@ impl From for CheckedRequest { match req { Request::HeaderByHash(req) => { let net_req = net_request::IncompleteHeadersRequest { - start: net_request::HashOrNumber::Hash(req.0).into(), + start: req.0.map(Into::into), skip: 0, max: 1, reverse: false, @@ -207,33 +263,33 @@ impl From for CheckedRequest { } Request::Body(req) => { let net_req = net_request::IncompleteBodyRequest { - hash: req.hash.into(), + hash: req.0.field(), }; CheckedRequest::Body(req, net_req) } Request::Receipts(req) => { let net_req = net_request::IncompleteReceiptsRequest { - hash: req.0.hash().into(), + hash: req.0.field(), }; CheckedRequest::Receipts(req, net_req) } - Request::Account(req) => { + Request::Account(req) => { let net_req = net_request::IncompleteAccountRequest { - block_hash: req.header.hash().into(), + block_hash: req.header.field(), address_hash: ::util::Hashable::sha3(&req.address).into(), }; CheckedRequest::Account(req, net_req) } Request::Code(req) => { let net_req = net_request::IncompleteCodeRequest { - block_hash: req.block_id.0.into(), + block_hash: req.header.field(), code_hash: req.code_hash.into(), }; CheckedRequest::Code(req, net_req) } Request::Execution(req) => { let net_req = net_request::IncompleteExecutionRequest { - block_hash: req.header.hash().into(), + block_hash: req.header.field(), from: req.tx.sender(), gas: req.tx.gas, gas_price: req.tx.gas_price, @@ -262,6 +318,119 @@ impl CheckedRequest { CheckedRequest::Execution(_, req) => NetRequest::Execution(req), } } + + /// Whether this needs a header from a prior request. + /// Returns `Some` with the index of the request returning the header + /// and the field giving the hash + /// if so, `None` otherwise. + pub fn needs_header(&self) -> Option<(usize, Field)> { + match *self { + CheckedRequest::Receipts(ref x, _) => x.0.needs_header(), + CheckedRequest::Body(ref x, _) => x.0.needs_header(), + CheckedRequest::Account(ref x, _) => x.header.needs_header(), + CheckedRequest::Code(ref x, _) => x.header.needs_header(), + CheckedRequest::Execution(ref x, _) => x.header.needs_header(), + _ => None, + } + } + + /// Provide a header where one was needed. Should only be called if `needs_header` + /// returns `Some`, and for correctness, only use the header yielded by the correct + /// request. + pub fn provide_header(&mut self, header: encoded::Header) { + match *self { + CheckedRequest::Receipts(ref mut x, _) => x.0 = HeaderRef::Stored(header), + CheckedRequest::Body(ref mut x, _) => x.0 = HeaderRef::Stored(header), + CheckedRequest::Account(ref mut x, _) => x.header = HeaderRef::Stored(header), + CheckedRequest::Code(ref mut x, _) => x.header = HeaderRef::Stored(header), + CheckedRequest::Execution(ref mut x, _) => x.header = HeaderRef::Stored(header), + _ => {}, + } + } + + /// Attempt to complete the request based on data in the cache. + pub fn respond_local(&self, cache: &Mutex<::cache::Cache>) -> Option { + match *self { + CheckedRequest::HeaderProof(ref check, _) => { + let mut cache = cache.lock(); + cache.block_hash(&check.num) + .and_then(|h| cache.chain_score(&h).map(|s| (h, s))) + .map(|(h, s)| Response::HeaderProof((h, s))) + } + CheckedRequest::HeaderByHash(_, ref req) => { + if let Some(&net_request::HashOrNumber::Hash(ref h)) = req.start.as_ref() { + return cache.lock().block_header(h).map(Response::HeaderByHash); + } + + None + } + CheckedRequest::Receipts(ref check, ref req) => { + // empty transactions -> no receipts + if check.0.as_ref().ok().map_or(false, |hdr| hdr.receipts_root() == SHA3_NULL_RLP) { + return Some(Response::Receipts(Vec::new())); + } + + req.hash.as_ref() + .and_then(|hash| cache.lock().block_receipts(hash)) + .map(Response::Receipts) + } + CheckedRequest::Body(ref check, ref req) => { + // check for empty body. + if let Some(hdr) = check.0.as_ref().ok() { + if hdr.transactions_root() == SHA3_NULL_RLP && hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { + let mut stream = RlpStream::new_list(3); + stream.append_raw(hdr.rlp().as_raw(), 1); + stream.begin_list(0); + stream.begin_list(0); + + return Some(Response::Body(encoded::Block::new(stream.out()))); + } + } + + // otherwise, check for cached body and header. + let block_hash = req.hash.as_ref() + .cloned() + .or_else(|| check.0.as_ref().ok().map(|hdr| hdr.hash())); + let block_hash = match block_hash { + Some(hash) => hash, + None => return None, + }; + + let mut cache = cache.lock(); + let cached_header; + + // can't use as_ref here although it seems like you would be able to: + // it complains about uninitialized `cached_header`. + let block_header = match check.0.as_ref().ok() { + Some(hdr) => Some(hdr), + None => { + cached_header = cache.block_header(&block_hash); + cached_header.as_ref() + } + }; + + block_header + .and_then(|hdr| cache.block_body(&block_hash).map(|b| (hdr, b))) + .map(|(hdr, body)| { + let mut stream = RlpStream::new_list(3); + let body = body.rlp(); + stream.append_raw(&hdr.rlp().as_raw(), 1); + stream.append_raw(&body.at(0).as_raw(), 1); + stream.append_raw(&body.at(1).as_raw(), 1); + + Response::Body(encoded::Block::new(stream.out())) + }) + } + CheckedRequest::Code(_, ref req) => { + if req.code_hash.as_ref().map_or(false, |&h| h == SHA3_EMPTY) { + Some(Response::Code(Vec::new())) + } else { + None + } + } + _ => None, + } + } } macro_rules! match_me { @@ -279,37 +448,40 @@ macro_rules! match_me { } impl IncompleteRequest for CheckedRequest { - type Complete = net_request::CompleteRequest; + type Complete = CompleteRequest; type Response = net_request::Response; - /// Check prior outputs against the needed inputs. - /// - /// This is called to ensure consistency of this request with - /// others in the same packet. - fn check_outputs(&self, f: F) -> Result<(), net_request::NoSuchOutput> + fn check_outputs(&self, mut f: F) -> Result<(), net_request::NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), net_request::NoSuchOutput> { - match_me!(*self, (_, ref req) => req.check_outputs(f)) + match *self { + CheckedRequest::HeaderProof(_, ref req) => req.check_outputs(f), + CheckedRequest::HeaderByHash(ref check, ref req) => { + req.check_outputs(&mut f)?; + + // make sure the output given is definitively a hash. + match check.0 { + Field::BackReference(r, idx) => f(r, idx, OutputKind::Hash), + _ => Ok(()), + } + } + CheckedRequest::Receipts(_, ref req) => req.check_outputs(f), + CheckedRequest::Body(_, ref req) => req.check_outputs(f), + CheckedRequest::Account(_, ref req) => req.check_outputs(f), + CheckedRequest::Code(_, ref req) => req.check_outputs(f), + CheckedRequest::Execution(_, ref req) => req.check_outputs(f), + } } - /// Note that this request will produce the following outputs. fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { match_me!(*self, (_, ref req) => req.note_outputs(f)) } - /// Fill fields of the request. - /// - /// This function is provided an "output oracle" which allows fetching of - /// prior request outputs. - /// Only outputs previously checked with `check_outputs` may be available. fn fill(&mut self, f: F) where F: Fn(usize, usize) -> Result { match_me!(*self, (_, ref mut req) => req.fill(f)) } - /// Will succeed if all fields have been filled, will fail otherwise. fn complete(self) -> Result { - use ::request::CompleteRequest; - match self { CheckedRequest::HeaderProof(_, req) => req.complete().map(CompleteRequest::HeaderProof), CheckedRequest::HeaderByHash(_, req) => req.complete().map(CompleteRequest::Headers), @@ -333,35 +505,42 @@ impl net_request::CheckedRequest for CheckedRequest { type Environment = Mutex<::cache::Cache>; /// Check whether the response matches (beyond the type). - fn check_response(&self, cache: &Mutex<::cache::Cache>, response: &Self::Response) -> Result { + fn check_response(&self, complete: &Self::Complete, cache: &Mutex<::cache::Cache>, response: &Self::Response) -> Result { use ::request::Response as NetResponse; // helper for expecting a specific response for a given request. macro_rules! expect { - ($res: pat => $e: expr) => { - match *response { + ($res: pat => $e: expr) => {{ + match (response, complete) { $res => $e, _ => Err(Error::WrongKind), } - } + }} } // check response against contained prover. match *self { - CheckedRequest::HeaderProof(ref prover, _) => expect!(NetResponse::HeaderProof(ref res) => - prover.check_response(cache, &res.proof).map(Response::HeaderProof)), - CheckedRequest::HeaderByHash(ref prover, _) => expect!(NetResponse::Headers(ref res) => - prover.check_response(cache, &res.headers).map(Response::HeaderByHash)), - CheckedRequest::Receipts(ref prover, _) => expect!(NetResponse::Receipts(ref res) => - prover.check_response(cache, &res.receipts).map(Response::Receipts)), - CheckedRequest::Body(ref prover, _) => expect!(NetResponse::Body(ref res) => - prover.check_response(cache, &res.body).map(Response::Body)), - CheckedRequest::Account(ref prover, _) => expect!(NetResponse::Account(ref res) => - prover.check_response(cache, &res.proof).map(Response::Account)), - CheckedRequest::Code(ref prover, _) => expect!(NetResponse::Code(ref res) => - prover.check_response(cache, &res.code).map(Response::Code)), - CheckedRequest::Execution(ref prover, _) => expect!(NetResponse::Execution(ref res) => - prover.check_response(cache, &res.items).map(Response::Execution)), + CheckedRequest::HeaderProof(ref prover, _) => + expect!((&NetResponse::HeaderProof(ref res), _) => + prover.check_response(cache, &res.proof).map(Response::HeaderProof)), + CheckedRequest::HeaderByHash(ref prover, _) => + expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) => + prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderByHash)), + CheckedRequest::Receipts(ref prover, _) => + expect!((&NetResponse::Receipts(ref res), _) => + prover.check_response(cache, &res.receipts).map(Response::Receipts)), + CheckedRequest::Body(ref prover, _) => + expect!((&NetResponse::Body(ref res), _) => + prover.check_response(cache, &res.body).map(Response::Body)), + CheckedRequest::Account(ref prover, _) => + expect!((&NetResponse::Account(ref res), _) => + prover.check_response(cache, &res.proof).map(Response::Account)), + CheckedRequest::Code(ref prover, _) => + expect!((&NetResponse::Code(ref res), &CompleteRequest::Code(ref req)) => + prover.check_response(cache, &req.code_hash, &res.code).map(Response::Code)), + CheckedRequest::Execution(ref prover, _) => + expect!((&NetResponse::Execution(ref res), _) => + prover.check_response(cache, &res.items).map(Response::Execution)), } } } @@ -387,6 +566,23 @@ pub enum Response { Execution(super::ExecutionResult), } +impl net_request::ResponseLike for Response { + fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + match *self { + Response::HeaderProof((ref hash, _)) => f(0, Output::Hash(*hash)), + Response::Account(None) => { + f(0, Output::Hash(SHA3_EMPTY)); // code hash + f(1, Output::Hash(SHA3_NULL_RLP)); // storage root. + } + Response::Account(Some(ref acc)) => { + f(0, Output::Hash(acc.code_hash)); + f(1, Output::Hash(acc.storage_root)); + } + _ => {} + } + } +} + /// Errors in verification. #[derive(Debug, PartialEq)] pub enum Error { @@ -398,6 +594,10 @@ pub enum Error { Trie(TrieError), /// Bad inclusion proof BadProof, + /// Header by number instead of hash. + HeaderByNumber, + /// Unresolved header reference. + UnresolvedHeader(usize), /// Wrong header number. WrongNumber(u64, u64), /// Wrong hash. @@ -468,62 +668,63 @@ impl HeaderProof { /// Request for a header by hash. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct HeaderByHash(pub H256); +pub struct HeaderByHash(pub Field); impl HeaderByHash { /// Check a response for the header. - pub fn check_response(&self, cache: &Mutex<::cache::Cache>, headers: &[encoded::Header]) -> Result { + pub fn check_response( + &self, + cache: &Mutex<::cache::Cache>, + start: &net_request::HashOrNumber, + headers: &[encoded::Header] + ) -> Result { + let expected_hash = match (self.0, start) { + (Field::Scalar(ref h), &net_request::HashOrNumber::Hash(ref h2)) => { + if h != h2 { return Err(Error::WrongHash(*h, *h2)) } + *h + } + (_, &net_request::HashOrNumber::Hash(h2)) => h2, + _ => return Err(Error::HeaderByNumber), + }; + let header = headers.get(0).ok_or(Error::Empty)?; let hash = header.sha3(); - match hash == self.0 { + match hash == expected_hash { true => { cache.lock().insert_block_header(hash, header.clone()); Ok(header.clone()) } - false => Err(Error::WrongHash(self.0, hash)), + false => Err(Error::WrongHash(expected_hash, hash)), } } } -/// Request for a block, with header and precomputed hash. +/// Request for a block, with header for verification. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Body { - /// The block's header. - pub header: encoded::Header, - /// The block's hash. - pub hash: H256, -} +pub struct Body(pub HeaderRef); impl Body { - /// Create a request for a block body from a given header. - pub fn new(header: encoded::Header) -> Self { - let hash = header.hash(); - Body { - header: header, - hash: hash, - } - } - /// Check a response for this block body. pub fn check_response(&self, cache: &Mutex<::cache::Cache>, body: &encoded::Body) -> Result { // check the integrity of the the body against the header + let header = self.0.as_ref()?; let tx_root = ::util::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec())); - if tx_root != self.header.transactions_root() { - return Err(Error::WrongTrieRoot(self.header.transactions_root(), tx_root)); + if tx_root != header.transactions_root() { + return Err(Error::WrongTrieRoot(header.transactions_root(), tx_root)); } let uncles_hash = body.rlp().at(1).as_raw().sha3(); - if uncles_hash != self.header.uncles_hash() { - return Err(Error::WrongHash(self.header.uncles_hash(), uncles_hash)); + if uncles_hash != header.uncles_hash() { + return Err(Error::WrongHash(header.uncles_hash(), uncles_hash)); } // concatenate the header and the body. let mut stream = RlpStream::new_list(3); - stream.append_raw(self.header.rlp().as_raw(), 1); + stream.append_raw(header.rlp().as_raw(), 1); stream.append_raw(body.rlp().at(0).as_raw(), 1); stream.append_raw(body.rlp().at(1).as_raw(), 1); - cache.lock().insert_block_body(self.hash, body.clone()); + cache.lock().insert_block_body(header.hash(), body.clone()); Ok(encoded::Block::new(stream.out())) } @@ -531,12 +732,12 @@ impl Body { /// Request for a block's receipts with header for verification. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct BlockReceipts(pub encoded::Header); +pub struct BlockReceipts(pub HeaderRef); impl BlockReceipts { /// Check a response with receipts against the stored header. pub fn check_response(&self, cache: &Mutex<::cache::Cache>, receipts: &[Receipt]) -> Result, Error> { - let receipts_root = self.0.receipts_root(); + let receipts_root = self.0.as_ref()?.receipts_root(); let found_root = ::util::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r).to_vec())); match receipts_root == found_root { @@ -553,7 +754,7 @@ impl BlockReceipts { #[derive(Debug, Clone, PartialEq, Eq)] pub struct Account { /// Header for verification. - pub header: encoded::Header, + pub header: HeaderRef, /// Address requested. pub address: Address, } @@ -561,7 +762,8 @@ pub struct Account { impl Account { /// Check a response with an account against the stored header. pub fn check_response(&self, _: &Mutex<::cache::Cache>, proof: &[Bytes]) -> Result, Error> { - let state_root = self.header.state_root(); + let header = self.header.as_ref()?; + let state_root = header.state_root(); let mut db = MemoryDB::new(); for node in proof { db.insert(&node[..]); } @@ -584,20 +786,25 @@ impl Account { /// Request for account code. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Code { - /// Block hash, number pair. - pub block_id: (H256, u64), + /// Header reference. + pub header: HeaderRef, /// Account's code hash. - pub code_hash: H256, + pub code_hash: Field, } impl Code { /// Check a response with code against the code hash. - pub fn check_response(&self, _: &Mutex<::cache::Cache>, code: &[u8]) -> Result, Error> { + pub fn check_response( + &self, + _: &Mutex<::cache::Cache>, + code_hash: &H256, + code: &[u8] + ) -> Result, Error> { let found_hash = code.sha3(); - if found_hash == self.code_hash { + if &found_hash == code_hash { Ok(code.to_vec()) } else { - Err(Error::WrongHash(self.code_hash, found_hash)) + Err(Error::WrongHash(*code_hash, found_hash)) } } } @@ -608,8 +815,9 @@ pub struct TransactionProof { /// The transaction to request proof of. pub tx: SignedTransaction, /// Block header. - pub header: encoded::Header, + pub header: HeaderRef, /// Transaction environment info. + // TODO: it's not really possible to provide this if the header is unknown. pub env_info: EnvInfo, /// Consensus engine. pub engine: Arc, @@ -618,7 +826,7 @@ pub struct TransactionProof { impl TransactionProof { /// Check the proof, returning the proved execution or indicate that the proof was bad. pub fn check_response(&self, _: &Mutex<::cache::Cache>, state_items: &[DBValue]) -> Result { - let root = self.header.state_root(); + let root = self.header.as_ref()?.state_root(); let mut env_info = self.env_info.clone(); env_info.gas_limit = self.tx.gas.clone(); @@ -697,7 +905,7 @@ mod tests { let raw_header = encoded::Header::new(::rlp::encode(&header).to_vec()); let cache = Mutex::new(make_cache()); - assert!(HeaderByHash(hash).check_response(&cache, &[raw_header]).is_ok()) + assert!(HeaderByHash(hash.into()).check_response(&cache, &hash.into(), &[raw_header]).is_ok()) } #[test] @@ -708,10 +916,7 @@ mod tests { let mut body_stream = RlpStream::new_list(2); body_stream.begin_list(0).begin_list(0); - let req = Body { - header: encoded::Header::new(::rlp::encode(&header).to_vec()), - hash: header.hash(), - }; + let req = Body(encoded::Header::new(::rlp::encode(&header).to_vec()).into()); let cache = Mutex::new(make_cache()); let response = encoded::Body::new(body_stream.drain().to_vec()); @@ -734,7 +939,7 @@ mod tests { header.set_receipts_root(receipts_root); - let req = BlockReceipts(encoded::Header::new(::rlp::encode(&header).to_vec())); + let req = BlockReceipts(encoded::Header::new(::rlp::encode(&header).to_vec()).into()); let cache = Mutex::new(make_cache()); assert!(req.check_response(&cache, &receipts).is_ok()) @@ -782,7 +987,7 @@ mod tests { header.set_state_root(root.clone()); let req = Account { - header: encoded::Header::new(::rlp::encode(&header).to_vec()), + header: encoded::Header::new(::rlp::encode(&header).to_vec()).into(), address: addr, }; @@ -793,13 +998,15 @@ mod tests { #[test] fn check_code() { let code = vec![1u8; 256]; + let code_hash = ::util::Hashable::sha3(&code); + let header = Header::new(); let req = Code { - block_id: (Default::default(), 2), - code_hash: ::util::Hashable::sha3(&code), + header: encoded::Header::new(::rlp::encode(&header).to_vec()).into(), + code_hash: code_hash.into(), }; let cache = Mutex::new(make_cache()); - assert!(req.check_response(&cache, &code).is_ok()); - assert!(req.check_response(&cache, &[]).is_err()); + assert!(req.check_response(&cache, &code_hash, &code).is_ok()); + assert!(req.check_response(&cache, &code_hash, &[]).is_err()); } } diff --git a/ethcore/light/src/on_demand/tests.rs b/ethcore/light/src/on_demand/tests.rs index d5789c5e1..10c4ceae5 100644 --- a/ethcore/light/src/on_demand/tests.rs +++ b/ethcore/light/src/on_demand/tests.rs @@ -28,7 +28,7 @@ use ::request::{self as basic_request, Response}; use std::sync::Arc; -use super::{request, OnDemand, Peer}; +use super::{request, OnDemand, Peer, HeaderRef}; // useful contexts to give the service. enum Context { @@ -122,7 +122,10 @@ fn dummy_capabilities() -> Capabilities { #[test] fn detects_hangup() { let on_demand = Harness::create().service; - let result = on_demand.header_by_hash(&Context::NoOp, request::HeaderByHash(H256::default())); + let result = on_demand.request_raw( + &Context::NoOp, + vec![request::HeaderByHash(H256::default().into()).into()], + ); assert_eq!(on_demand.pending.read().len(), 1); drop(result); @@ -148,7 +151,7 @@ fn single_request() { let recv = harness.service.request_raw( &Context::NoOp, - vec![request::HeaderByHash(header.hash()).into()] + vec![request::HeaderByHash(header.hash().into()).into()] ).unwrap(); assert_eq!(harness.service.pending.read().len(), 1); @@ -182,7 +185,7 @@ fn no_capabilities() { let _recv = harness.service.request_raw( &Context::NoOp, - vec![request::HeaderByHash(Default::default()).into()] + vec![request::HeaderByHash(H256::default().into()).into()] ).unwrap(); assert_eq!(harness.service.pending.read().len(), 1); @@ -209,7 +212,7 @@ fn reassign() { let recv = harness.service.request_raw( &Context::NoOp, - vec![request::HeaderByHash(header.hash()).into()] + vec![request::HeaderByHash(header.hash().into()).into()] ).unwrap(); assert_eq!(harness.service.pending.read().len(), 1); @@ -264,8 +267,8 @@ fn partial_response() { let recv = harness.service.request_raw( &Context::NoOp, vec![ - request::HeaderByHash(header1.hash()).into(), - request::HeaderByHash(header2.hash()).into(), + request::HeaderByHash(header1.hash().into()).into(), + request::HeaderByHash(header2.hash().into()).into(), ], ).unwrap(); @@ -323,8 +326,8 @@ fn part_bad_part_good() { let recv = harness.service.request_raw( &Context::NoOp, vec![ - request::HeaderByHash(header1.hash()).into(), - request::HeaderByHash(header2.hash()).into(), + request::HeaderByHash(header1.hash().into()).into(), + request::HeaderByHash(header2.hash().into()).into(), ], ).unwrap(); @@ -378,7 +381,7 @@ fn wrong_kind() { let _recv = harness.service.request_raw( &Context::NoOp, - vec![request::HeaderByHash(Default::default()).into()] + vec![request::HeaderByHash(H256::default().into()).into()] ).unwrap(); assert_eq!(harness.service.pending.read().len(), 1); @@ -395,3 +398,100 @@ fn wrong_kind() { assert_eq!(harness.service.pending.read().len(), 1); } + +#[test] +fn back_references() { + let harness = Harness::create(); + + let peer_id = 10101; + let req_id = ReqId(14426); + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + let header = Header::default(); + let encoded = encoded::Header::new(header.rlp(Seal::With)); + + let recv = harness.service.request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header.hash().into()).into(), + request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(), + ] + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + + assert_eq!(harness.service.pending.read().len(), 0); + + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_id, + &[ + Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] }), + Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }), + ] + ); + + assert!(recv.wait().is_ok()); +} + +#[test] +#[should_panic] +fn bad_back_reference() { + let harness = Harness::create(); + + let header = Header::default(); + + let _ = harness.service.request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header.hash().into()).into(), + request::BlockReceipts(HeaderRef::Unresolved(1, header.hash().into())).into(), + ] + ).unwrap(); +} + +#[test] +fn fill_from_cache() { + let harness = Harness::create(); + + let peer_id = 10101; + let req_id = ReqId(14426); + + harness.inject_peer(peer_id, Peer { + status: dummy_status(), + capabilities: dummy_capabilities(), + }); + + let header = Header::default(); + let encoded = encoded::Header::new(header.rlp(Seal::With)); + + let recv = harness.service.request_raw( + &Context::NoOp, + vec![ + request::HeaderByHash(header.hash().into()).into(), + request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(), + ] + ).unwrap(); + + assert_eq!(harness.service.pending.read().len(), 1); + + harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id)); + + assert_eq!(harness.service.pending.read().len(), 0); + + harness.service.on_responses( + &Context::WithPeer(peer_id), + req_id, + &[ + Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] }), + ] + ); + + assert!(recv.wait().is_ok()); +} diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index dff33513a..0b413677d 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -19,6 +19,7 @@ //! supplied as well. use std::collections::HashMap; +use std::ops::{Deref, DerefMut}; use request::{ IncompleteRequest, OutputKind, Output, NoSuchOutput, ResponseError, ResponseLike, }; @@ -124,23 +125,14 @@ impl Requests { req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) } } -} -impl Requests { - /// Supply a response for the next request. - /// Fails on: wrong request kind, all requests answered already. - pub fn supply_response(&mut self, env: &T::Environment, response: &T::Response) - -> Result> - { - let idx = self.answered; - - // check validity. - if self.is_complete() { return Err(ResponseError::Unexpected) } - - let extracted = self.requests[idx] - .check_response(env, response).map_err(ResponseError::Validity)?; + /// Supply a response, asserting its correctness. + /// Fill outputs based upon it. + pub fn supply_response_unchecked(&mut self, response: &R) { + if self.is_complete() { return } let outputs = &mut self.outputs; + let idx = self.answered; response.fill_outputs(|out_idx, output| { // we don't need to check output kinds here because all back-references // are validated in the builder. @@ -154,7 +146,26 @@ impl Requests { if let Some(ref mut req) = self.requests.get_mut(self.answered) { req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) } + } +} +impl Requests { + /// Supply a response for the next request. + /// Fails on: wrong request kind, all requests answered already. + pub fn supply_response(&mut self, env: &T::Environment, response: &T::Response) + -> Result> + { + let idx = self.answered; + + // check validity. + if idx == self.requests.len() { return Err(ResponseError::Unexpected) } + let completed = self.next_complete() + .expect("only fails when all requests have been answered; this just checked against; qed"); + + let extracted = self.requests[idx] + .check_response(&completed, env, response).map_err(ResponseError::Validity)?; + + self.supply_response_unchecked(response); Ok(extracted) } } @@ -182,6 +193,20 @@ impl Requests { } } +impl Deref for Requests { + type Target = [T]; + + fn deref(&self) -> &[T] { + &self.requests[..] + } +} + +impl DerefMut for Requests { + fn deref_mut(&mut self) -> &mut [T] { + &mut self.requests[..] + } +} + #[cfg(test)] mod tests { use request::*; diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 3953aa88b..5b15a0180 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -83,7 +83,7 @@ pub enum ResponseError { } /// An input to a request. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Field { /// A pre-specified input. Scalar(T), @@ -93,6 +93,29 @@ pub enum Field { } impl Field { + /// Helper for creating a new back-reference field. + pub fn back_ref(idx: usize, req: usize) -> Self { + Field::BackReference(idx, req) + } + + /// map a scalar into some other item. + pub fn map(self, f: F) -> Field where F: FnOnce(T) -> U { + match self { + Field::Scalar(x) => Field::Scalar(f(x)), + Field::BackReference(req, idx) => Field::BackReference(req, idx), + } + } + + /// Attempt to get a reference to the inner scalar. + pub fn as_ref(&self) -> Option<&T> { + match *self { + Field::Scalar(ref x) => Some(x), + Field::BackReference(_, _) => None, + } + } + + + // attempt conversion into scalar value. fn into_scalar(self) -> Result { match self { @@ -400,7 +423,7 @@ impl CheckedRequest for Request { type Error = WrongKind; type Environment = (); - fn check_response(&self, _: &(), response: &Response) -> Result<(), WrongKind> { + fn check_response(&self, _: &Self::Complete, _: &(), response: &Response) -> Result<(), WrongKind> { if self.kind() == response.kind() { Ok(()) } else { @@ -587,7 +610,7 @@ pub trait CheckedRequest: IncompleteRequest { type Environment; /// Check whether the response matches (beyond the type). - fn check_response(&self, &Self::Environment, &Self::Response) -> Result; + fn check_response(&self, &Self::Complete, &Self::Environment, &Self::Response) -> Result; } /// A response-like object. diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs index c643daa2a..090521ba5 100644 --- a/parity/light_helpers/queue_cull.rs +++ b/parity/light_helpers/queue_cull.rs @@ -27,7 +27,7 @@ use light::client::Client; use light::on_demand::{request, OnDemand}; use light::TransactionQueue; -use futures::{future, stream, Future, Stream}; +use futures::{future, Future}; use parity_reactor::Remote; @@ -73,28 +73,32 @@ impl IoHandler for QueueCull { self.remote.spawn_with_timeout(move || { let maybe_fetching = sync.with_context(move |ctx| { // fetch the nonce of each sender in the queue. - let nonce_futures = senders.iter() - .map(|&address| request::Account { header: best_header.clone(), address: address }) - .map(move |request| { - on_demand.account(ctx, request) - .map(move |maybe_acc| maybe_acc.map_or(start_nonce, |acc| acc.nonce)) - }) - .zip(senders.iter()) - .map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce))); + let nonce_reqs = senders.iter() + .map(|&address| request::Account { header: best_header.clone().into(), address: address }) + .collect::>(); - // as they come in, update each sender to the new nonce. - stream::futures_unordered(nonce_futures) - .fold(txq, |txq, (address, nonce)| { - txq.write().cull(address, nonce); - future::ok(txq) + // when they come in, update each sender to the new nonce. + on_demand.request(ctx, nonce_reqs) + .expect("No back-references; therefore all back-references are valid; qed") + .map(move |accs| { + let txq = txq.write(); + let _ = accs.into_iter() + .map(|maybe_acc| maybe_acc.map_or(start_nonce, |acc| acc.nonce)) + .zip(senders) + .fold(txq, |mut txq, (nonce, addr)| { + txq.cull(addr, nonce); + txq + }); }) - .map(|_| ()) // finally, discard the txq handle and log errors. .map_err(|_| debug!(target: "cull", "OnDemand prematurely closed channel.")) }); match maybe_fetching { Some(fut) => fut.boxed(), - None => future::ok(()).boxed(), + None => { + debug!(target: "cull", "Unable to acquire network context; qed"); + future::ok(()).boxed() + } } }, Duration::from_millis(PURGE_TIMEOUT_MS), || {}) } diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 68fff78b7..efd118967 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -20,7 +20,7 @@ use std::fmt::Debug; use std::ops::Deref; use std::sync::{Arc, Weak}; -use futures::{future, stream, Future, Stream, BoxFuture}; +use futures::{future, Future, BoxFuture}; use light::cache::Cache as LightDataCache; use light::client::LightChainClient; use light::on_demand::{request, OnDemand}; @@ -185,25 +185,28 @@ pub fn fetch_gas_price_corpus( let eventual_corpus = sync.with_context(|ctx| { // get some recent headers with gas used, // and request each of the blocks from the network. - let block_futures = client.ancestry_iter(BlockId::Latest) + let block_requests = client.ancestry_iter(BlockId::Latest) .filter(|hdr| hdr.gas_used() != U256::default()) .take(GAS_PRICE_SAMPLE_SIZE) - .map(request::Body::new) - .map(|req| on_demand.block(ctx, req)); + .map(|hdr| request::Body(hdr.into())) + .collect::>(); - // as the blocks come in, collect gas prices into a vector - stream::futures_unordered(block_futures) - .fold(Vec::new(), |mut v, block| { - for t in block.transaction_views().iter() { - v.push(t.gas_price()) - } + // when the blocks come in, collect gas prices into a vector + on_demand.request(ctx, block_requests) + .expect("no back-references; therefore all back-references are valid; qed") + .map(|bodies| { + bodies.into_iter().fold(Vec::new(), |mut v, block| { + for t in block.transaction_views().iter() { + v.push(t.gas_price()) + } - future::ok(v) + v + }) }) - .map(move |v| { + .map(move |prices| { // produce a corpus from the vector, cache it, and return // the median as the intended gas price. - let corpus: ::stats::Corpus<_> = v.into(); + let corpus: ::stats::Corpus<_> = prices.into(); cache.lock().set_gas_price_corpus(corpus.clone()); corpus }) @@ -282,10 +285,10 @@ impl LightDispatcher { let best_header = self.client.best_block_header(); let account_start_nonce = self.client.engine().account_start_nonce(); - let nonce_future = self.sync.with_context(|ctx| self.on_demand.account(ctx, request::Account { - header: best_header, + let nonce_future = self.sync.with_context(|ctx| self.on_demand.request(ctx, request::Account { + header: best_header.into(), address: addr, - })); + }).expect("no back-references; therefore all back-references valid; qed")); match nonce_future { Some(x) => diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 7132106cb..8bcd5115a 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -31,7 +31,8 @@ use jsonrpc_macros::Trailing; use light::cache::Cache; use light::client::LightChainClient; use light::cht; -use light::on_demand::{OnDemand, request}; +use light::on_demand::{request, OnDemand, HeaderRef, Request as OnDemandRequest, Response as OnDemandResponse}; +use light::request::Field; use ethsync::LightSync; use util::{Address, Mutex, Uint, U256}; @@ -55,51 +56,72 @@ pub struct LightFetch { /// Type alias for convenience. pub type ExecutionResult = Result; +// extract the header indicated by the given `HeaderRef` from the given responses. +// fails only if they do not correspond. +fn extract_header(res: &[OnDemandResponse], header: HeaderRef) -> Option { + match header { + HeaderRef::Stored(hdr) => Some(hdr), + HeaderRef::Unresolved(idx, _) => match res.get(idx) { + Some(&OnDemandResponse::HeaderByHash(ref hdr)) => Some(hdr.clone()), + _ => None, + }, + } +} + impl LightFetch { - /// Get a block header from the on demand service or client, or error. - pub fn header(&self, id: BlockId) -> BoxFuture { + // push the necessary requests onto the request chain to get the header by the given ID. + // yield a header reference which other requests can use. + fn make_header_requests(&self, id: BlockId, reqs: &mut Vec) -> Result { if let Some(h) = self.client.block_header(id) { - return future::ok(h).boxed() + return Ok(h.into()); } - let maybe_future = match id { + match id { BlockId::Number(n) => { let cht_root = cht::block_to_cht_number(n).and_then(|cn| self.client.cht_root(cn as usize)); match cht_root { - None => return future::err(errors::unknown_block()).boxed(), + None => Err(errors::unknown_block()), Some(root) => { let req = request::HeaderProof::new(n, root) .expect("only fails for 0; client always stores genesis; client already queried; qed"); - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - self.sync.with_context(|ctx| { - let fut = self.on_demand.hash_by_number(ctx, req) - .map(request::HeaderByHash) - .map_err(errors::on_demand_cancel); + let idx = reqs.len(); + let hash_ref = Field::back_ref(idx, 0); + reqs.push(req.into()); + reqs.push(request::HeaderByHash(hash_ref.clone()).into()); - fut.and_then(move |req| { - match sync.with_context(|ctx| on_demand.header_by_hash(ctx, req)) { - Some(fut) => fut.map_err(errors::on_demand_cancel).boxed(), - None => future::err(errors::network_disabled()).boxed(), - } - }).boxed() - }) + Ok(HeaderRef::Unresolved(idx + 1, hash_ref)) } } } BlockId::Hash(h) => { - self.sync.with_context(|ctx| - self.on_demand.header_by_hash(ctx, request::HeaderByHash(h)) - .then(|res| future::done(match res { - Ok(h) => Ok(h), - Err(e) => Err(errors::on_demand_cancel(e)), - })) - .boxed() - ) + reqs.push(request::HeaderByHash(h.into()).into()); + + let idx = reqs.len(); + Ok(HeaderRef::Unresolved(idx, h.into())) } - _ => None, // latest, earliest, and pending will have all already returned. + _ => Err(errors::unknown_block()) // latest, earliest, and pending will have all already returned. + } + } + + /// Get a block header from the on demand service or client, or error. + pub fn header(&self, id: BlockId) -> BoxFuture { + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return future::err(e).boxed(), }; + let maybe_future = self.sync.with_context(move |ctx| { + self.on_demand.request_raw(ctx, reqs) + .expect("all back-references known to be valid; qed") + .map(|res| extract_header(&res, header_ref) + .expect("these responses correspond to requests that header_ref belongs to. \ + therefore it will not fail; qed")) + .map_err(errors::on_demand_cancel) + .boxed() + }); + match maybe_future { Some(recv) => recv, None => future::err(errors::network_disabled()).boxed() @@ -109,19 +131,29 @@ impl LightFetch { /// helper for getting account info at a given block. /// `None` indicates the account doesn't exist at the given block. pub fn account(&self, address: Address, id: BlockId) -> BoxFuture, Error> { - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return future::err(e).boxed(), + }; - self.header(id).and_then(move |header| { - let maybe_fut = sync.with_context(|ctx| on_demand.account(ctx, request::Account { - header: header, - address: address, - })); + reqs.push(request::Account { header: header_ref, address: address }.into()); - match maybe_fut { - Some(fut) => fut.map_err(errors::on_demand_cancel).boxed(), - None => future::err(errors::network_disabled()).boxed(), - } - }).boxed() + let maybe_future = self.sync.with_context(move |ctx| { + self.on_demand.request_raw(ctx, reqs) + .expect("all back-references known to be valid; qed") + .map(|mut res| match res.pop() { + Some(OnDemandResponse::Account(acc)) => acc, + _ => panic!("responses correspond directly with requests in amount and type; qed"), + }) + .map_err(errors::on_demand_cancel) + .boxed() + }); + + match maybe_future { + Some(recv) => recv, + None => future::err(errors::network_disabled()).boxed() + } } /// helper for getting proved execution. @@ -182,13 +214,16 @@ impl LightFetch { let request = request::TransactionProof { tx: tx, - header: hdr, + header: hdr.into(), env_info: env_info, engine: client.engine().clone(), }; let proved_future = sync.with_context(move |ctx| { - on_demand.transaction_proof(ctx, request).map_err(errors::on_demand_cancel).boxed() + on_demand + .request(ctx, request) + .expect("no back-references; therefore all back-refs valid; qed") + .map_err(errors::on_demand_cancel).boxed() }); match proved_future { @@ -200,13 +235,28 @@ impl LightFetch { /// get a block itself. fails on unknown block ID. pub fn block(&self, id: BlockId) -> BoxFuture { - let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone()); + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return future::err(e).boxed(), + }; - self.header(id).map(request::Body::new).and_then(move |req| { - match sync.with_context(move |ctx| on_demand.block(ctx, req)) { - Some(fut) => fut.map_err(errors::on_demand_cancel).boxed(), - None => future::err(errors::network_disabled()).boxed(), - } - }).boxed() + reqs.push(request::Body(header_ref).into()); + + let maybe_future = self.sync.with_context(move |ctx| { + self.on_demand.request_raw(ctx, reqs) + .expect("all back-references known to be valid; qed") + .map(|mut res| match res.pop() { + Some(OnDemandResponse::Body(b)) => b, + _ => panic!("responses correspond directly with requests in amount and type; qed"), + }) + .map_err(errors::on_demand_cancel) + .boxed() + }); + + match maybe_future { + Some(recv) => recv, + None => future::err(errors::network_disabled()).boxed() + } } } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index e02ccc987..fc61b0605 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -59,6 +59,8 @@ use v1::metadata::Metadata; use util::Address; +const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; + /// Light client `ETH` (and filter) RPC. pub struct EthClient { sync: Arc, @@ -186,16 +188,17 @@ impl EthClient { // - network is down. // - we get a score, but our hash is non-canonical. // - we get a score, and our hash is canonical. - let maybe_fut = sync.with_context(move |ctx| on_demand.hash_and_score_by_number(ctx, req)); + let maybe_fut = sync.with_context(move |ctx| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS)); match maybe_fut { - Some(fut) => fut.map(move |(hash, score)| { + Some(fut) => fut + .map(move |(hash, score)| { let score = if hash == block.hash() { Some(score) } else { None }; - fill_rich(block, score) + fill_rich(block, score) }).map_err(errors::on_demand_cancel).boxed(), None => return future::err(errors::network_disabled()).boxed(), } @@ -295,7 +298,8 @@ impl Eth for EthClient { if hdr.transactions_root() == SHA3_NULL_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { - sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr))) + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into()))) .map(|x| x.map_err(errors::on_demand_cancel).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) @@ -310,7 +314,8 @@ impl Eth for EthClient { if hdr.transactions_root() == SHA3_NULL_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { - sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr))) + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into()))) .map(|x| x.map_err(errors::on_demand_cancel).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) @@ -325,7 +330,8 @@ impl Eth for EthClient { if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { - sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr))) + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into()))) .map(|x| x.map_err(errors::on_demand_cancel).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) @@ -340,7 +346,8 @@ impl Eth for EthClient { if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { - sync.with_context(|ctx| on_demand.block(ctx, request::Body::new(hdr))) + sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) + .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into()))) .map(|x| x.map_err(errors::on_demand_cancel).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) @@ -501,8 +508,8 @@ impl Filterable for EthClient { let hdr_bloom = hdr.log_bloom(); bit_combos.iter().find(|&bloom| hdr_bloom & *bloom == *bloom).is_some() }) - .map(|hdr| (hdr.number(), request::BlockReceipts(hdr))) - .map(|(num, req)| self.on_demand.block_receipts(ctx, req).map(move |x| (num, x))) + .map(|hdr| (hdr.number(), request::BlockReceipts(hdr.into()))) + .map(|(num, req)| self.on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS).map(move |x| (num, x))) .collect(); // as the receipts come in, find logs within them which match the filter. From 53add78a283976f3a35fc356f17007ba0310c35c Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 23 May 2017 15:35:34 +0300 Subject: [PATCH 20/29] Fix build (#5684) * missing feature * dapps update --- ethcore/native_contracts/Cargo.toml | 1 + parity/dapps.rs | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ethcore/native_contracts/Cargo.toml b/ethcore/native_contracts/Cargo.toml index 57cca0923..c3ef59d31 100644 --- a/ethcore/native_contracts/Cargo.toml +++ b/ethcore/native_contracts/Cargo.toml @@ -16,3 +16,4 @@ native-contract-generator = { path = "generator" } [features] default = [] +test_contracts = [] \ No newline at end of file diff --git a/parity/dapps.rs b/parity/dapps.rs index a265a72b6..ff0dd2139 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -95,7 +95,7 @@ impl ContractClient for LightRegistrar { let maybe_future = self.sync.with_context(move |ctx| { self.on_demand - .transaction_proof(ctx, on_demand::request::TransactionProof { + .request(ctx, on_demand::request::TransactionProof { tx: Transaction { nonce: self.client.engine().account_start_nonce(), action: Action::Call(address), @@ -104,10 +104,11 @@ impl ContractClient for LightRegistrar { value: 0.into(), data: data, }.fake_sign(Address::default()), - header: header, + header: on_demand::request::HeaderRef::Stored(header), env_info: env_info, engine: self.client.engine().clone(), }) + .expect("todo: handle error") .then(|res| match res { Ok(Ok(executed)) => Ok(executed.output), Ok(Err(e)) => Err(format!("Failed to execute transaction: {}", e)), From 2092b805b6a72e5a39d01ff6776c774eee25c41e Mon Sep 17 00:00:00 2001 From: Micah Zoltu Date: Tue, 23 May 2017 05:37:27 -0700 Subject: [PATCH 21/29] Updated docs slightly. (#5674) These docs _really_ need a lot of improvement (like what are the options that go in `{}`, but at least they are now not outright incorrect. --- js/src/api/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/js/src/api/README.md b/js/src/api/README.md index b7c8240b0..1a2051eaf 100644 --- a/js/src/api/README.md +++ b/js/src/api/README.md @@ -109,9 +109,9 @@ contract.at('0xa9280...7347b'); find & call a function ```javascript -contract.named - .callMe - .call({ gas: 21000 }, [true, 'someString']) // or estimateGas or sendTransaction +contract.instance + .myContractMethodName + .call({}, [myContractMethodParameter]) // or estimateGas or sendTransaction .then((result) => { console.log(`the result was ${result}`); }); From 6b6555852ce1ad3f6d7733612da0aeeb1b52b452 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Tue, 23 May 2017 13:07:10 +0000 Subject: [PATCH 22/29] [ci skip] js-precompiled 20170523-130314 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc02a63e9..05a44cd6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1847,7 +1847,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#6597fc70499226546fdcb35e7c09f9347f4f3c07" +source = "git+https://github.com/paritytech/js-precompiled.git#c48daa735fc9267edfa2452bce3308fa63e1816a" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index e23ebf9cb..0c4c6dfa9 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.82", + "version": "1.7.83", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 2ab21acf114732c1681e4440f9d5b5a759d0ecc3 Mon Sep 17 00:00:00 2001 From: Afri Date: Tue, 23 May 2017 15:28:41 +0200 Subject: [PATCH 23/29] Update CHANGELOG for 1.6.7 (#5683) --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b52c4e1f3..d109cfdb0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,31 @@ +## Parity [v1.6.7](https://github.com/paritytech/parity/releases/tag/v1.6.7) (2017-05-18) + +This release addresses: + +- potential usability issues with [import and recovery of existing accounts](https://blog.parity.io/restoring-blank-seed-phrase/). +- canceling scheduled transactions via RPC or UI. +- warp sync issues with the Kovan network. + +Full changelog: + +- Backporting to beta [#5657](https://github.com/paritytech/parity/pull/5657) + - Add CHANGELOG.md [#5513](https://github.com/paritytech/parity/pull/5513) + - Reorg into blocks before minimum history [#5558](https://github.com/paritytech/parity/pull/5558) + - Bump to v1.6.7 +- Cancel Transaction [#5656](https://github.com/paritytech/parity/pull/5656) + - option to disable persistent txqueue [#5544](https://github.com/paritytech/parity/pull/5544) + - Remove transaction RPC [#4949](https://github.com/paritytech/parity/pull/4949) + - Cancel tx JS [#4958](https://github.com/paritytech/parity/pull/4958) + - Updating documentation for RPCs [#5392](https://github.com/paritytech/parity/pull/5392) +- Backport Recover button [#5654](https://github.com/paritytech/parity/pull/5654) + - Backport [#5645](https://github.com/paritytech/parity/pull/5645) +- Add monotonic step to Kovan [#5630](https://github.com/paritytech/parity/pull/5630) + - Add monotonic transition to kovan [#5587](https://github.com/paritytech/parity/pull/5587) +- Fix ethsign [#5600](https://github.com/paritytech/parity/pull/5600) +- Registry backports [#5445](https://github.com/paritytech/parity/pull/5445) + - Fixes to the Registry dapp [#4984](https://github.com/paritytech/parity/pull/4984) + - Fix references to api outside of `parity.js` [#4981](https://github.com/paritytech/parity/pull/4981) + ## Parity [v1.6.6](https://github.com/paritytech/parity/releases/tag/v1.6.6) (2017-04-11) This release brings warp sync support for kovan network. From dd004aba9f9640091e2d9c3f95dd7c3142106a4d Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 23 May 2017 15:49:17 +0200 Subject: [PATCH 24/29] EIP-140 (#5477) --- ethcore/res/ethereum/metropolis_test.json | 51 ++++++++++++++++++ ethcore/res/ethereum/tests | 2 +- ethcore/src/ethereum/mod.rs | 3 ++ ethcore/src/evm/evm.rs | 34 +++++++++--- ethcore/src/evm/instructions.rs | 3 ++ ethcore/src/evm/interpreter/gasometer.rs | 2 +- ethcore/src/evm/interpreter/mod.rs | 31 ++++++++--- ethcore/src/evm/mod.rs | 2 +- ethcore/src/evm/schedule.rs | 4 ++ ethcore/src/evm/tests.rs | 2 +- ethcore/src/executive.rs | 63 +++++++++++++++++------ ethcore/src/json_tests/executive.rs | 4 +- ethcore/src/json_tests/state.rs | 2 +- ethcore/src/spec/spec.rs | 3 ++ ethcore/src/tests/helpers.rs | 9 +++- evmbin/src/main.rs | 8 +-- json/src/spec/params.rs | 3 ++ 17 files changed, 186 insertions(+), 40 deletions(-) create mode 100644 ethcore/res/ethereum/metropolis_test.json diff --git a/ethcore/res/ethereum/metropolis_test.json b/ethcore/res/ethereum/metropolis_test.json new file mode 100644 index 000000000..9b367c9f4 --- /dev/null +++ b/ethcore/res/ethereum/metropolis_test.json @@ -0,0 +1,51 @@ +{ + "name": "Metropolis (Test)", + "engine": { + "Ethash": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "minimumDifficulty": "0x020000", + "difficultyBoundDivisor": "0x0800", + "durationLimit": "0x0d", + "blockReward": "0x4563918244F40000", + "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", + "homesteadTransition": "0x0", + "eip150Transition": "0x0", + "eip155Transition": "0x7fffffffffffffff", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "maxCodeSize": 24576 + } + } + }, + "params": { + "accountStartNonce": "0x00", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x1", + "eip98Transition": "0x7fffffffffffffff", + "eip86Transition": "0x0", + "eip140Transition": "0x0" + }, + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x0000000000000042", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x400000000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", + "gasLimit": "0x1388" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } } + } +} diff --git a/ethcore/res/ethereum/tests b/ethcore/res/ethereum/tests index ef191fdc6..4e8b9be3f 160000 --- a/ethcore/res/ethereum/tests +++ b/ethcore/res/ethereum/tests @@ -1 +1 @@ -Subproject commit ef191fdc61cf76cdb9cdc147465fb447304b0ed2 +Subproject commit 4e8b9be3fba16ec32e0cdf50b8f9329826283aaa diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index af0582a36..475496c2b 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -75,6 +75,9 @@ pub fn new_transition_test() -> Spec { load(include_bytes!("../../res/ethereum/t /// Create a new Foundation Mainnet chain spec without genesis accounts. pub fn new_mainnet_like() -> Spec { load(include_bytes!("../../res/ethereum/frontier_like_test.json")) } +/// Create a new Foundation Metropolis era spec. +pub fn new_metropolis_test() -> Spec { load(include_bytes!("../../res/ethereum/metropolis_test.json")) } + /// Create a new Foundation Ropsten chain spec. pub fn new_ropsten() -> Spec { load(include_bytes!("../../res/ethereum/ropsten.json")) } diff --git a/ethcore/src/evm/evm.rs b/ethcore/src/evm/evm.rs index 265b83559..e03ded071 100644 --- a/ethcore/src/evm/evm.rs +++ b/ethcore/src/evm/evm.rs @@ -104,8 +104,25 @@ pub type Result = ::std::result::Result; pub enum GasLeft<'a> { /// Known gas left Known(U256), - /// Return instruction must be processed. - NeedsReturn(U256, &'a [u8]), + /// Return or Revert instruction must be processed. + NeedsReturn { + /// Amount of gas left. + gas_left: U256, + /// Return data buffer. + data: &'a [u8], + /// Apply or revert state changes on revert. + apply_state: bool + }, +} + +/// Finalization result. Gas Left: either it is a known value, or it needs to be computed by processing +/// a return instruction. +#[derive(Debug)] +pub struct FinalizationResult { + /// Final amount of gas left. + pub gas_left: U256, + /// Apply execution state changes or revert them. + pub apply_state: bool, } /// Types that can be "finalized" using an EVM. @@ -113,15 +130,18 @@ pub enum GasLeft<'a> { /// In practice, this is just used to define an inherent impl on /// `Reult>`. pub trait Finalize { - /// Consume the externalities, call return if necessary, and produce a final amount of gas left. - fn finalize(self, ext: E) -> Result; + /// Consume the externalities, call return if necessary, and produce call result. + fn finalize(self, ext: E) -> Result; } impl<'a> Finalize for Result> { - fn finalize(self, ext: E) -> Result { + fn finalize(self, ext: E) -> Result { match self { - Ok(GasLeft::Known(gas)) => Ok(gas), - Ok(GasLeft::NeedsReturn(gas, ret_code)) => ext.ret(&gas, ret_code), + Ok(GasLeft::Known(gas_left)) => Ok(FinalizationResult { gas_left: gas_left, apply_state: true }), + Ok(GasLeft::NeedsReturn {gas_left, data, apply_state}) => ext.ret(&gas_left, data).map(|gas_left| FinalizationResult { + gas_left: gas_left, + apply_state: apply_state, + }), Err(err) => Err(err), } } diff --git a/ethcore/src/evm/instructions.rs b/ethcore/src/evm/instructions.rs index eef1a9e3b..336a3dcf1 100644 --- a/ethcore/src/evm/instructions.rs +++ b/ethcore/src/evm/instructions.rs @@ -279,6 +279,7 @@ lazy_static! { arr[DELEGATECALL as usize] = InstructionInfo::new("DELEGATECALL", 0, 6, 1, true, GasPriceTier::Special); arr[SUICIDE as usize] = InstructionInfo::new("SUICIDE", 0, 1, 0, true, GasPriceTier::Special); arr[CREATE2 as usize] = InstructionInfo::new("CREATE2", 0, 3, 1, true, GasPriceTier::Special); + arr[REVERT as usize] = InstructionInfo::new("REVERT", 0, 2, 0, true, GasPriceTier::Zero); arr }; } @@ -556,6 +557,8 @@ pub const RETURN: Instruction = 0xf3; pub const DELEGATECALL: Instruction = 0xf4; /// create a new account and set creation address to sha3(sender + sha3(init code)) % 2**160 pub const CREATE2: Instruction = 0xfb; +/// stop execution and revert state changes. Return output data. +pub const REVERT: Instruction = 0xfd; /// halt execution and register account for later deletion pub const SUICIDE: Instruction = 0xff; diff --git a/ethcore/src/evm/interpreter/gasometer.rs b/ethcore/src/evm/interpreter/gasometer.rs index 246c93bad..8f2309fe2 100644 --- a/ethcore/src/evm/interpreter/gasometer.rs +++ b/ethcore/src/evm/interpreter/gasometer.rs @@ -164,7 +164,7 @@ impl Gasometer { instructions::MSTORE8 => { Request::GasMem(default_gas, mem_needed_const(stack.peek(0), 1)?) }, - instructions::RETURN => { + instructions::RETURN | instructions::REVERT => { Request::GasMem(default_gas, mem_needed(stack.peek(0), stack.peek(1))?) }, instructions::SHA3 => { diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index f08737d24..c0a151f49 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -84,8 +84,16 @@ enum InstructionResult { Ok, UnusedGas(Gas), JumpToPosition(U256), - // gas left, init_orf, init_size - StopExecutionNeedsReturn(Gas, U256, U256), + StopExecutionNeedsReturn { + /// Gas left. + gas: Gas, + /// Return data offset. + init_off: U256, + /// Return data size. + init_size: U256, + /// Apply or revert state changes. + apply: bool, + }, StopExecution, } @@ -156,9 +164,13 @@ impl evm::Evm for Interpreter { let pos = self.verify_jump(position, &valid_jump_destinations)?; reader.position = pos; }, - InstructionResult::StopExecutionNeedsReturn(gas, off, size) => { + InstructionResult::StopExecutionNeedsReturn {gas, init_off, init_size, apply} => { informant.done(); - return Ok(GasLeft::NeedsReturn(gas.as_u256(), self.mem.read_slice(off, size))); + return Ok(GasLeft::NeedsReturn { + gas_left: gas.as_u256(), + data: self.mem.read_slice(init_off, init_size), + apply_state: apply + }); }, InstructionResult::StopExecution => break, _ => {}, @@ -183,7 +195,8 @@ impl Interpreter { let schedule = ext.schedule(); if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) || - (instruction == instructions::CREATE2 && !schedule.have_create2) { + (instruction == instructions::CREATE2 && !schedule.have_create2) || + (instruction == instructions::REVERT && !schedule.have_revert) { return Err(evm::Error::BadInstruction { instruction: instruction @@ -363,7 +376,13 @@ impl Interpreter { let init_off = stack.pop_back(); let init_size = stack.pop_back(); - return Ok(InstructionResult::StopExecutionNeedsReturn(gas, init_off, init_size)) + return Ok(InstructionResult::StopExecutionNeedsReturn {gas: gas, init_off: init_off, init_size: init_size, apply: true}) + }, + instructions::REVERT => { + let init_off = stack.pop_back(); + let init_size = stack.pop_back(); + + return Ok(InstructionResult::StopExecutionNeedsReturn {gas: gas, init_off: init_off, init_size: init_size, apply: false}) }, instructions::STOP => { return Ok(InstructionResult::StopExecution); diff --git a/ethcore/src/evm/mod.rs b/ethcore/src/evm/mod.rs index 7906b81ff..8693a3467 100644 --- a/ethcore/src/evm/mod.rs +++ b/ethcore/src/evm/mod.rs @@ -31,7 +31,7 @@ mod tests; #[cfg(all(feature="benches", test))] mod benches; -pub use self::evm::{Evm, Error, Finalize, GasLeft, Result, CostType}; +pub use self::evm::{Evm, Error, Finalize, FinalizationResult, GasLeft, Result, CostType}; pub use self::ext::{Ext, ContractCreateResult, MessageCallResult, CreateContractAddress}; pub use self::factory::{Factory, VMType}; pub use self::schedule::Schedule; diff --git a/ethcore/src/evm/schedule.rs b/ethcore/src/evm/schedule.rs index 3e01f3925..2a3d0f70a 100644 --- a/ethcore/src/evm/schedule.rs +++ b/ethcore/src/evm/schedule.rs @@ -24,6 +24,8 @@ pub struct Schedule { pub have_delegate_call: bool, /// Does it have a CREATE_P2SH instruction pub have_create2: bool, + /// Does it have a REVERT instruction + pub have_revert: bool, /// VM stack limit pub stack_limit: usize, /// Max number of nested calls/creates @@ -120,6 +122,7 @@ impl Schedule { exceptional_failed_code_deposit: true, have_delegate_call: true, have_create2: have_metropolis_instructions, + have_revert: have_metropolis_instructions, stack_limit: 1024, max_depth: 1024, tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], @@ -171,6 +174,7 @@ impl Schedule { exceptional_failed_code_deposit: efcd, have_delegate_call: hdc, have_create2: false, + have_revert: false, stack_limit: 1024, max_depth: 1024, tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index b5b2341aa..21a0fc378 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -64,7 +64,7 @@ pub struct FakeExt { fn test_finalize(res: Result) -> Result { match res { Ok(GasLeft::Known(gas)) => Ok(gas), - Ok(GasLeft::NeedsReturn(_, _)) => unimplemented!(), // since ret is unimplemented. + Ok(GasLeft::NeedsReturn{..}) => unimplemented!(), // since ret is unimplemented. Err(e) => Err(e), } } diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index bfba4ab3d..33f7f59d5 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -22,7 +22,7 @@ use engines::Engine; use types::executed::CallType; use env_info::EnvInfo; use error::ExecutionError; -use evm::{self, Ext, Factory, Finalize, CreateContractAddress}; +use evm::{self, Ext, Factory, Finalize, CreateContractAddress, FinalizationResult}; use externalities::*; use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer}; use transaction::{Action, SignedTransaction}; @@ -246,7 +246,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { output_policy: OutputPolicy, tracer: &mut T, vm_tracer: &mut V - ) -> evm::Result where T: Tracer, V: VMTracer { + ) -> evm::Result where T: Tracer, V: VMTracer { let depth_threshold = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get() / STACK_SIZE_PER_DEPTH); @@ -366,9 +366,9 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let traces = subtracer.traces(); match res { - Ok(ref gas_left) => tracer.trace_call( + Ok(ref res) => tracer.trace_call( trace_info, - gas - *gas_left, + gas - res.gas_left, trace_output, traces ), @@ -379,7 +379,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { self.enact_result(&res, substate, unconfirmed_substate); trace!(target: "executive", "enacted: substate={:?}\n", substate); - res + res.map(|r| r.gas_left) } else { // otherwise it's just a basic transaction, only do tracing, if necessary. self.state.discard_checkpoint(); @@ -438,9 +438,9 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { vm_tracer.done_subtrace(subvmtracer); match res { - Ok(ref gas_left) => tracer.trace_create( + Ok(ref res) => tracer.trace_create( trace_info, - gas - *gas_left, + gas - res.gas_left, trace_output, created, subtracer.traces() @@ -449,7 +449,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { }; self.enact_result(&res, substate, unconfirmed_substate); - res + res.map(|r| r.gas_left) } /// Finalizes the transaction (does refunds and suicides). @@ -536,14 +536,15 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { } } - fn enact_result(&mut self, result: &evm::Result, substate: &mut Substate, un_substate: Substate) { + fn enact_result(&mut self, result: &evm::Result, substate: &mut Substate, un_substate: Substate) { match *result { Err(evm::Error::OutOfGas) | Err(evm::Error::BadJumpDestination {..}) | Err(evm::Error::BadInstruction {.. }) | Err(evm::Error::StackUnderflow {..}) | Err(evm::Error::BuiltIn {..}) - | Err(evm::Error::OutOfStack {..}) => { + | Err(evm::Error::OutOfStack {..}) + | Ok(FinalizationResult { apply_state: false, .. }) => { self.state.revert_to_checkpoint(); }, Ok(_) | Err(evm::Error::Internal(_)) => { @@ -1242,11 +1243,43 @@ mod tests { }; match result { - Err(_) => { - }, - _ => { - panic!("Expected OutOfGas"); - } + Err(_) => {}, + _ => panic!("Expected OutOfGas"), } } + + evm_test!{test_revert: test_revert_jit, test_revert_int} + fn test_revert(factory: Factory) { + let contract_address = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); + let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + // EIP-140 test case + let code = "6c726576657274656420646174616000557f726576657274206d657373616765000000000000000000000000000000000000600052600e6000fd".from_hex().unwrap(); + let returns = "726576657274206d657373616765".from_hex().unwrap(); + let mut state = get_temp_state(); + state.add_balance(&sender, &U256::from_str("152d02c7e14af68000000").unwrap(), CleanupMode::NoEmpty).unwrap(); + state.commit().unwrap(); + + let mut params = ActionParams::default(); + params.address = contract_address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(20025); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::zero()); + let mut state = get_temp_state(); + state.add_balance(&sender, &U256::from_str("152d02c7e14af68000000").unwrap(), CleanupMode::NoEmpty).unwrap(); + let info = EnvInfo::default(); + let engine = TestEngine::new_metropolis(); + let mut substate = Substate::new(); + + let mut output = [0u8; 14]; + let result = { + let mut ex = Executive::new(&mut state, &info, &engine, &factory); + ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap() + }; + + assert_eq!(result, U256::from(1)); + assert_eq!(output[..], returns[..]); + assert_eq!(state.storage_at(&contract_address, &H256::from(&U256::zero())).unwrap(), H256::from(&U256::from(0))); + } } diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index c34ad69e3..37c7ebf50 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -254,9 +254,9 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { match res { Err(_) => fail_unless(out_of_gas, "didn't expect to run out of gas."), - Ok(gas_left) => { + Ok(res) => { fail_unless(!out_of_gas, "expected to run out of gas."); - fail_unless(Some(gas_left) == vm.gas_left.map(Into::into), "gas_left is incorrect"); + fail_unless(Some(res.gas_left) == vm.gas_left.map(Into::into), "gas_left is incorrect"); let vm_output: Option> = vm.output.map(Into::into); fail_unless(Some(output) == vm_output, "output is incorrect"); diff --git a/ethcore/src/json_tests/state.rs b/ethcore/src/json_tests/state.rs index c15847896..d0d5e9746 100644 --- a/ethcore/src/json_tests/state.rs +++ b/ethcore/src/json_tests/state.rs @@ -29,6 +29,7 @@ lazy_static! { pub static ref HOMESTEAD: Spec = ethereum::new_homestead_test(); pub static ref EIP150: Spec = ethereum::new_eip150_test(); pub static ref EIP161: Spec = ethereum::new_eip161_test(); + pub static ref _METROPOLIS: Spec = ethereum::new_metropolis_test(); } pub fn json_chain_test(json_data: &[u8]) -> Vec { @@ -92,7 +93,6 @@ mod state_tests { } declare_test!{GeneralStateTest_stAttackTest, "GeneralStateTests/stAttackTest/"} - declare_test!{GeneralStateTest_stBlockHashTest, "GeneralStateTests/stBlockHashTest/"} declare_test!{GeneralStateTest_stBoundsTest, "GeneralStateTests/stBoundsTest/"} declare_test!{GeneralStateTest_stCallCodes, "GeneralStateTests/stCallCodes/"} declare_test!{skip => [ "createJS_ExampleContract" ], GeneralStateTest_stCallCreateCallCodeTest, "GeneralStateTests/stCallCreateCallCodeTest/"} diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index efbfc435d..74805b233 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -61,6 +61,8 @@ pub struct CommonParams { pub validate_receipts_transition: u64, /// Number of first block where EIP-86 (Metropolis) rules begin. pub eip86_transition: BlockNumber, + /// Number of first block where EIP-140 (Metropolis: REVERT opcode) rules begin. + pub eip140_transition: BlockNumber, } impl From for CommonParams { @@ -76,6 +78,7 @@ impl From for CommonParams { eip98_transition: p.eip98_transition.map_or(0, Into::into), validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into), eip86_transition: p.eip86_transition.map_or(BlockNumber::max_value(), Into::into), + eip140_transition: p.eip140_transition.map_or(BlockNumber::max_value(), Into::into), } } } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 548187e48..57b0adbea 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -56,6 +56,13 @@ impl TestEngine { max_depth: max_depth, } } + + pub fn new_metropolis() -> TestEngine { + TestEngine { + engine: ethereum::new_metropolis_test().engine, + max_depth: 0, + } + } } impl Engine for TestEngine { @@ -72,7 +79,7 @@ impl Engine for TestEngine { } fn schedule(&self, _block_number: u64) -> Schedule { - let mut schedule = Schedule::new_frontier(); + let mut schedule = self.engine.schedule(0); schedule.max_depth = self.max_depth; schedule } diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 6fbe5a280..02513eebd 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -75,12 +75,12 @@ pub fn run_vm(params: ActionParams) -> Result { let mut ext = ext::FakeExt::default(); let start = Instant::now(); - let gas_left = vm.exec(params, &mut ext).finalize(ext); + let res = vm.exec(params, &mut ext).finalize(ext); let duration = start.elapsed(); - match gas_left { - Ok(gas_left) => Ok(Success { - gas_used: initial_gas - gas_left, + match res { + Ok(res) => Ok(Success { + gas_used: initial_gas - res.gas_left, // TODO [ToDr] get output from ext output: Vec::new(), time: duration, diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index 31b5cf68a..824466ffc 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -59,6 +59,9 @@ pub struct Params { /// See `CommonParams` docs. #[serde(rename="eip86Transition")] pub eip86_transition: Option, + /// See `CommonParams` docs. + #[serde(rename="eip140Transition")] + pub eip140_transition: Option, } #[cfg(test)] From 00cdc523235298f1f00ef6d4a4bdf31f917b8c8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 23 May 2017 18:05:17 +0200 Subject: [PATCH 25/29] Parity-PubSub as a separate API. (#5676) * Parity-PubSub as a separate API. * Fix tests. * Fixes after merge. --- ethcore/native_contracts/Cargo.toml | 2 +- parity/cli/mod.rs | 6 +-- parity/dapps.rs | 4 +- parity/rpc_apis.rs | 68 +++++++++++++++++++++-------- 4 files changed, 56 insertions(+), 24 deletions(-) diff --git a/ethcore/native_contracts/Cargo.toml b/ethcore/native_contracts/Cargo.toml index c3ef59d31..6b225220f 100644 --- a/ethcore/native_contracts/Cargo.toml +++ b/ethcore/native_contracts/Cargo.toml @@ -16,4 +16,4 @@ native-contract-generator = { path = "generator" } [features] default = [] -test_contracts = [] \ No newline at end of file +test_contracts = [] diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 45285fa4a..b5afb8d0a 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -172,7 +172,7 @@ usage! { or |c: &Config| otry!(c.rpc).interface.clone(), flag_jsonrpc_cors: Option = None, or |c: &Config| otry!(c.rpc).cors.clone().map(Some), - flag_jsonrpc_apis: String = "web3,eth,pubsub,net,parity,traces,rpc,secretstore", + flag_jsonrpc_apis: String = "web3,eth,pubsub,net,parity,parity_pubsub,traces,rpc,secretstore", or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), flag_jsonrpc_hosts: String = "none", or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), @@ -186,7 +186,7 @@ usage! { or |c: &Config| otry!(c.websockets).port.clone(), flag_ws_interface: String = "local", or |c: &Config| otry!(c.websockets).interface.clone(), - flag_ws_apis: String = "web3,eth,pubsub,net,parity,traces,rpc,secretstore", + flag_ws_apis: String = "web3,eth,pubsub,net,parity,parity_pubsub,traces,rpc,secretstore", or |c: &Config| otry!(c.websockets).apis.as_ref().map(|vec| vec.join(",")), flag_ws_origins: String = "none", or |c: &Config| otry!(c.websockets).origins.as_ref().map(|vec| vec.join(",")), @@ -198,7 +198,7 @@ usage! { or |c: &Config| otry!(c.ipc).disable.clone(), flag_ipc_path: String = if cfg!(windows) { r"\\.\pipe\jsonrpc.ipc" } else { "$BASE/jsonrpc.ipc" }, or |c: &Config| otry!(c.ipc).path.clone(), - flag_ipc_apis: String = "web3,eth,pubsub,net,parity,parity_accounts,traces,rpc,secretstore", + flag_ipc_apis: String = "web3,eth,pubsub,net,parity,parity_pubsub,parity_accounts,traces,rpc,secretstore", or |c: &Config| otry!(c.ipc).apis.as_ref().map(|vec| vec.join(",")), // DAPPS diff --git a/parity/dapps.rs b/parity/dapps.rs index ff0dd2139..324e40403 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -104,11 +104,11 @@ impl ContractClient for LightRegistrar { value: 0.into(), data: data, }.fake_sign(Address::default()), - header: on_demand::request::HeaderRef::Stored(header), + header: header.into(), env_info: env_info, engine: self.client.engine().clone(), }) - .expect("todo: handle error") + .expect("No back-references; therefore all back-refs valid; qed") .then(|res| match res { Ok(Ok(executed)) => Ok(executed.output), Ok(Err(e)) => Err(format!("Failed to execute transaction: {}", e)), diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index d738d1eee..d456a0fff 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -54,6 +54,8 @@ pub enum Api { Signer, /// Parity - Custom extensions (Safe) Parity, + /// Parity PubSub - Generic Publish-Subscriber (Safety depends on other APIs exposed). + ParityPubSub, /// Parity Accounts extensions (UNSAFE: Passwords, Side Effects (new account)) ParityAccounts, /// Parity - Set methods (UNSAFE: Side Effects affecting node operation) @@ -80,6 +82,7 @@ impl FromStr for Api { "personal" => Ok(Personal), "signer" => Ok(Signer), "parity" => Ok(Parity), + "parity_pubsub" => Ok(ParityPubSub), "parity_accounts" => Ok(ParityAccounts), "parity_set" => Ok(ParitySet), "traces" => Ok(Traces), @@ -161,6 +164,7 @@ fn to_modules(apis: &[Api]) -> BTreeMap { Api::Signer => ("signer", "1.0"), Api::Parity => ("parity", "1.0"), Api::ParityAccounts => ("parity_accounts", "1.0"), + Api::ParityPubSub => ("parity_pubsub", "1.0"), Api::ParitySet => ("parity_set", "1.0"), Api::Traces => ("traces", "1.0"), Api::Rpc => ("rpc", "1.0"), @@ -288,13 +292,15 @@ impl FullDependencies { self.dapps_port, ).to_delegate()); + if !for_generic_pubsub { + add_signing_methods!(ParitySigning, handler, self); + } + }, + Api::ParityPubSub => { if !for_generic_pubsub { let mut rpc = MetaIoHandler::default(); self.extend_api(&mut rpc, apis, true); handler.extend_with(PubSubClient::new(rpc, self.remote.clone()).to_delegate()); - - add_signing_methods!(EthSigning, handler, self); - add_signing_methods!(ParitySigning, handler, self); } }, Api::ParityAccounts => { @@ -364,11 +370,13 @@ pub struct LightDependencies { pub remote: parity_reactor::Remote, } -impl Dependencies for LightDependencies { - type Notifier = LightClientNotifier; - - fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } - fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]) { +impl LightDependencies { + fn extend_api>( + &self, + handler: &mut MetaIoHandler, + apis: &[Api], + for_generic_pubsub: bool, + ) { use parity_rpc::v1::*; let dispatcher = LightDispatcher::new( @@ -416,8 +424,11 @@ impl Dependencies for LightDependencies { self.cache.clone(), ); handler.extend_with(Eth::to_delegate(client.clone())); - handler.extend_with(EthFilter::to_delegate(client)); - add_signing_methods!(EthSigning, handler, self); + + if !for_generic_pubsub { + handler.extend_with(EthFilter::to_delegate(client)); + add_signing_methods!(EthSigning, handler, self); + } }, Api::EthPubSub => { let client = EthPubSubClient::new(self.client.clone(), self.remote.clone()); @@ -450,8 +461,16 @@ impl Dependencies for LightDependencies { self.dapps_port, ).to_delegate()); - add_signing_methods!(EthSigning, handler, self); - add_signing_methods!(ParitySigning, handler, self); + if !for_generic_pubsub { + add_signing_methods!(ParitySigning, handler, self); + } + }, + Api::ParityPubSub => { + if !for_generic_pubsub { + let mut rpc = MetaIoHandler::default(); + self.extend_api(&mut rpc, apis, true); + handler.extend_with(PubSubClient::new(rpc, self.remote.clone()).to_delegate()); + } }, Api::ParityAccounts => { let secret_store = Some(self.secret_store.clone()); @@ -479,6 +498,15 @@ impl Dependencies for LightDependencies { } } +impl Dependencies for LightDependencies { + type Notifier = LightClientNotifier; + + fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } + fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]) { + self.extend_api(handler, apis, false) + } +} + impl ApiSet { /// Retains only APIs in given set. pub fn retain(self, set: Self) -> Self { @@ -494,15 +522,18 @@ impl ApiSet { ApiSet::PublicContext => public_list, ApiSet::UnsafeContext => { public_list.insert(Api::Traces); + public_list.insert(Api::ParityPubSub); public_list }, ApiSet::IpcContext => { public_list.insert(Api::Traces); + public_list.insert(Api::ParityPubSub); public_list.insert(Api::ParityAccounts); public_list }, ApiSet::SafeContext => { public_list.insert(Api::Traces); + public_list.insert(Api::ParityPubSub); public_list.insert(Api::ParityAccounts); public_list.insert(Api::ParitySet); public_list.insert(Api::Signer); @@ -510,6 +541,7 @@ impl ApiSet { }, ApiSet::All => { public_list.insert(Api::Traces); + public_list.insert(Api::ParityPubSub); public_list.insert(Api::ParityAccounts); public_list.insert(Api::ParitySet); public_list.insert(Api::Signer); @@ -564,7 +596,7 @@ mod test { fn test_api_set_unsafe_context() { let expected = vec![ // make sure this list contains only SAFE methods - Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::ParityPubSub, Api::Traces, Api::Rpc, Api::SecretStore ].into_iter().collect(); assert_eq!(ApiSet::UnsafeContext.list_apis(), expected); } @@ -573,7 +605,7 @@ mod test { fn test_api_set_ipc_context() { let expected = vec![ // safe - Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::ParityPubSub, Api::Traces, Api::Rpc, Api::SecretStore, // semi-safe Api::ParityAccounts ].into_iter().collect(); @@ -584,7 +616,7 @@ mod test { fn test_api_set_safe_context() { let expected = vec![ // safe - Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::ParityPubSub, Api::Traces, Api::Rpc, Api::SecretStore, // semi-safe Api::ParityAccounts, // Unsafe @@ -596,7 +628,7 @@ mod test { #[test] fn test_all_apis() { assert_eq!("all".parse::().unwrap(), ApiSet::List(vec![ - Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::ParityPubSub, Api::Traces, Api::Rpc, Api::SecretStore, Api::ParityAccounts, Api::ParitySet, Api::Signer, Api::Personal @@ -606,7 +638,7 @@ mod test { #[test] fn test_all_without_personal_apis() { assert_eq!("personal,all,-personal".parse::().unwrap(), ApiSet::List(vec![ - Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::ParityPubSub, Api::Traces, Api::Rpc, Api::SecretStore, Api::ParityAccounts, Api::ParitySet, Api::Signer, ].into_iter().collect())); @@ -615,7 +647,7 @@ mod test { #[test] fn test_safe_parsing() { assert_eq!("safe".parse::().unwrap(), ApiSet::List(vec![ - Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::EthPubSub, Api::Parity, Api::Traces, Api::Rpc, Api::SecretStore, + Api::Web3, Api::Net, Api::Eth, Api::EthPubSub, Api::Parity, Api::ParityPubSub, Api::Traces, Api::Rpc, Api::SecretStore, ].into_iter().collect())); } } From 7499efecf61bb9475d9c2187bbfe53d9d37421d1 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 24 May 2017 12:53:02 +0300 Subject: [PATCH 26/29] fix from/into electum bug (#5686) --- ethkey/src/signature.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethkey/src/signature.rs b/ethkey/src/signature.rs index bb73581e6..91fe3e2a3 100644 --- a/ethkey/src/signature.rs +++ b/ethkey/src/signature.rs @@ -54,7 +54,7 @@ impl Signature { /// Parse bytes as a signature encoded as RSV (V in "Electrum" notation). /// May return empty (invalid) signature if given data has invalid length. pub fn from_electrum(data: &[u8]) -> Self { - if data.len() != 65 || data[0] < 27 { + if data.len() != 65 || data[64] < 27 { // fallback to empty (invalid) signature return Signature::default(); } From cbcc369a2de2880d20650911535f130ad0e47dc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 24 May 2017 12:24:07 +0200 Subject: [PATCH 27/29] UI server refactoring (#5580) * Full API in Authenticated WS server. * Replacing UI server with Hyper. * Solving CLI, RPCs and tests. * Porting signer tests. * Fixing origin recognition for dapps/rpc. * Fixing tests. Adding parity-rpc-client to test. * Dapps exposed as RPC method. * JS code to support new connection scheme. * Fixing dapps tests. * Updating allowed origins/hosts to support web3.site. * Fixing tests, fixing UI. * Fixing tests. * Removing invalid tests. * Fixing merge. * 404 fallback for UI * Improve ContentFetcher constructor readability. * Naming. * Update .gitlab-ci.yml fix CI lint error * Fixing tests and linting issues. * Fixing new tests. * UI hosts. * Submodules fix. --- .gitlab-ci.yml | 2 +- Cargo.lock | 49 +- Cargo.toml | 5 +- dapps/src/api/api.rs | 50 +- dapps/src/api/mod.rs | 1 - dapps/src/api/response.rs | 6 - dapps/src/api/types.rs | 40 -- dapps/src/apps/app.rs | 55 ++ dapps/src/apps/fetcher/mod.rs | 37 +- dapps/src/apps/fs.rs | 7 +- dapps/src/apps/manifest.rs | 2 +- dapps/src/apps/mod.rs | 35 +- dapps/src/apps/ui.rs | 55 ++ dapps/src/endpoint.rs | 3 +- dapps/src/lib.rs | 139 +++-- dapps/src/page/builtin.rs | 33 +- dapps/src/proxypac.rs | 14 +- dapps/src/router.rs | 114 ++-- dapps/src/rpc.rs | 97 ---- dapps/src/tests/api.rs | 112 ---- dapps/src/tests/helpers/mod.rs | 53 +- dapps/src/tests/redirection.rs | 12 +- js/src/api/rpc/parity/parity.js | 20 +- js/src/dapps/console/parity.js | 540 +++++++++--------- js/src/dapps/registry/ui/image.js | 4 +- js/src/dapps/tokenreg/Tokens/Token/token.js | 3 +- js/src/embed.js | 3 + js/src/environment/index.js | 12 +- js/src/index.js | 5 +- js/src/jsonrpc/interfaces/parity.js | 52 +- js/src/secureApi.js | 81 ++- js/src/util/dapps.js | 24 +- js/src/views/Dapps/dappStore.spec.js | 26 +- js/src/views/Web/store.spec.js | 8 +- js/webpack/build.server.js | 7 +- js/webpack/dev.server.js | 6 - js/webpack/shared.js | 10 +- parity/cli/mod.rs | 7 +- parity/cli/usage.txt | 5 + parity/configuration.rs | 160 +++--- parity/dapps.rs | 97 +++- parity/main.rs | 5 +- parity/rpc.rs | 267 +++++---- parity/rpc_apis.rs | 59 +- parity/run.rs | 173 +++--- parity/signer.rs | 125 +--- rpc/Cargo.toml | 8 +- rpc/rpctest/Cargo.toml | 17 - rpc/rpctest/src/main.rs | 148 ----- .../authcode_store.rs => rpc/src/authcodes.rs | 7 +- rpc/src/{metadata.rs => http_common.rs} | 29 +- rpc/src/lib.rs | 57 +- rpc/src/tests/helpers.rs | 84 +++ signer/build.rs => rpc/src/tests/mod.rs | 12 +- rpc/src/tests/rpc.rs | 172 ++++++ .../src/tests/mod.rs => rpc/src/tests/ws.rs | 175 +----- rpc/src/v1/extractors.rs | 263 +++++++++ rpc/src/v1/helpers/dapps.rs | 33 ++ rpc/src/v1/helpers/errors.rs | 8 + rpc/src/v1/helpers/mod.rs | 6 +- rpc/src/v1/helpers/signer.rs | 17 +- rpc/src/v1/impls/light/parity.rs | 32 +- rpc/src/v1/impls/light/parity_set.rs | 11 +- rpc/src/v1/impls/parity.rs | 32 +- rpc/src/v1/impls/parity_set.rs | 18 +- rpc/src/v1/{helpers => }/informant.rs | 0 rpc/src/v1/metadata.rs | 5 +- rpc/src/v1/mod.rs | 24 +- rpc/src/v1/tests/helpers/dapps.rs | 37 ++ rpc/src/v1/tests/helpers/mod.rs | 12 +- rpc/src/v1/tests/mocked/parity.rs | 53 +- rpc/src/v1/tests/mocked/parity_set.rs | 27 +- rpc/src/v1/tests/mocked/signer.rs | 2 +- rpc/src/v1/tests/mocked/signing.rs | 2 +- rpc/src/v1/traits/parity.rs | 16 +- rpc/src/v1/traits/parity_set.rs | 6 +- rpc/src/v1/types/confirmations.rs | 7 +- rpc/src/v1/types/dapps.rs | 57 ++ rpc/src/v1/types/mod.rs | 2 + rpc/src/v1/types/provenance.rs | 38 +- rpc_cli/src/lib.rs | 2 +- rpc_client/Cargo.toml | 4 +- rpc_client/src/client.rs | 4 +- rpc_client/src/lib.rs | 18 +- rpc_client/src/signer_client.rs | 16 +- scripts/targets.sh | 3 +- signer/Cargo.toml | 32 -- signer/src/lib.rs | 73 --- signer/src/ws_server/error_tpl.html | 21 - signer/src/ws_server/mod.rs | 219 ------- signer/src/ws_server/session.rs | 333 ----------- 91 files changed, 2171 insertions(+), 2591 deletions(-) create mode 100644 dapps/src/apps/app.rs create mode 100644 dapps/src/apps/ui.rs delete mode 100644 dapps/src/rpc.rs delete mode 100644 rpc/rpctest/Cargo.toml delete mode 100644 rpc/rpctest/src/main.rs rename signer/src/authcode_store.rs => rpc/src/authcodes.rs (99%) rename rpc/src/{metadata.rs => http_common.rs} (64%) create mode 100644 rpc/src/tests/helpers.rs rename signer/build.rs => rpc/src/tests/mod.rs (79%) create mode 100644 rpc/src/tests/rpc.rs rename signer/src/tests/mod.rs => rpc/src/tests/ws.rs (53%) create mode 100644 rpc/src/v1/extractors.rs create mode 100644 rpc/src/v1/helpers/dapps.rs rename rpc/src/v1/{helpers => }/informant.rs (100%) create mode 100644 rpc/src/v1/tests/helpers/dapps.rs create mode 100644 rpc/src/v1/types/dapps.rs delete mode 100644 signer/Cargo.toml delete mode 100644 signer/src/lib.rs delete mode 100644 signer/src/ws_server/error_tpl.html delete mode 100644 signer/src/ws_server/mod.rs delete mode 100644 signer/src/ws_server/session.rs diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bc9d9257d..c8f5f4d77 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -564,7 +564,7 @@ test-windows: - git submodule update --init --recursive script: - set RUST_BACKTRACE=1 - - echo cargo test --features json-tests -p rlp -p ethash -p ethcore -p ethcore-bigint -p ethcore-dapps -p parity-rpc -p ethcore-signer -p ethcore-util -p ethcore-network -p ethcore-io -p ethkey -p ethstore -p ethsync -p ethcore-ipc -p ethcore-ipc-tests -p ethcore-ipc-nano -p parity %CARGOFLAGS% --verbose --release + - echo cargo test --features json-tests -p rlp -p ethash -p ethcore -p ethcore-bigint -p parity-dapps -p parity-rpc -p ethcore-util -p ethcore-network -p ethcore-io -p ethkey -p ethstore -p ethsync -p ethcore-ipc -p ethcore-ipc-tests -p ethcore-ipc-nano -p parity-rpc-client -p parity %CARGOFLAGS% --verbose --release tags: - rust-windows allow_failure: true diff --git a/Cargo.lock b/Cargo.lock index 05a44cd6b..b63349f77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -609,26 +609,6 @@ dependencies = [ "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ethcore-signer" -version = "1.7.0" -dependencies = [ - "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-devtools 1.7.0", - "ethcore-io 1.7.0", - "ethcore-util 1.7.0", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-rpc 1.7.0", - "parity-ui 1.7.0", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7)", -] - [[package]] name = "ethcore-stratum" version = "1.7.0" @@ -1608,7 +1588,6 @@ dependencies = [ "ethcore-light 1.7.0", "ethcore-logger 1.7.0", "ethcore-secretstore 1.0.0", - "ethcore-signer 1.7.0", "ethcore-stratum 1.7.0", "ethcore-util 1.7.0", "ethkey 0.2.0", @@ -1797,18 +1776,18 @@ dependencies = [ name = "parity-rpc-client" version = "1.4.0" dependencies = [ - "ethcore-signer 1.7.0", "ethcore-util 1.7.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc 1.7.0", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7)", ] [[package]] @@ -2385,11 +2364,6 @@ name = "siphasher" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "slab" -version = "0.2.0" -source = "git+https://github.com/carllerche/slab?rev=5476efcafb#5476efcafbc5ef4d7315b1bea3f756d8a1fe975e" - [[package]] name = "slab" version = "0.2.0" @@ -2823,25 +2797,10 @@ name = "winapi-build" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "ws" -version = "0.5.3" -source = "git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7#30415c17f1bec53b2dcabae5b8b887df75dcbe34" -dependencies = [ - "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.1 (git+https://github.com/paritytech/mio)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)", - "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "ws" version = "0.6.0" -source = "git+https://github.com/tomusdrw/ws-rs#3259e7ca906c848beae109eb32e492871f8f397d" +source = "git+https://github.com/tomusdrw/ws-rs#7f8e416b7f048880228005457e117128be38bf0f" dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3082,7 +3041,6 @@ dependencies = [ "checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c" "checksum shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f20b8f3c060374edb8046591ba28f62448c369ccbdc7b02075103fb3a9e38d" "checksum siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c44e42fa187b5a8782489cf7740cc27c3125806be2bf33563cf5e02e9533fcd" -"checksum slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)" = "" "checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410" @@ -3135,7 +3093,6 @@ dependencies = [ "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7)" = "" "checksum ws 0.6.0 (git+https://github.com/tomusdrw/ws-rs)" = "" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453" diff --git a/Cargo.toml b/Cargo.toml index c0eccff28..597766a49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,6 @@ ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethcore-io = { path = "util/io" } ethcore-devtools = { path = "devtools" } -ethcore-signer = { path = "signer" } ethcore-ipc = { path = "ipc/rpc" } ethcore-ipc-nano = { path = "ipc/nano" } ethcore-ipc-hypervisor = { path = "ipc/hypervisor" } @@ -75,17 +74,15 @@ default = ["ui-precompiled"] ui = [ "dapps", "parity-dapps/ui", - "ethcore-signer/ui", ] ui-precompiled = [ "dapps", - "ethcore-signer/ui-precompiled", "parity-dapps/ui-precompiled", ] dapps = ["parity-dapps"] ipc = ["ethcore/ipc", "ethsync/ipc"] jit = ["ethcore/jit"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "parity-rpc/dev", "parity-dapps/dev", "ethcore-signer/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "parity-rpc/dev", "parity-dapps/dev"] json-tests = ["ethcore/json-tests"] test-heavy = ["ethcore/test-heavy"] ethkey-cli = ["ethcore/ethkey-cli"] diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 064ad6d42..d377ebe57 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -16,42 +16,27 @@ use std::sync::Arc; -use unicase::UniCase; use hyper::{server, net, Decoder, Encoder, Next, Control}; -use hyper::header; use hyper::method::Method; -use api::types::{App, ApiError}; +use api::types::ApiError; use api::response; use apps::fetcher::Fetcher; use handlers::extract_url; -use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; -use jsonrpc_http_server::{self, AccessControlAllowOrigin}; +use endpoint::{Endpoint, Handler, EndpointPath}; #[derive(Clone)] pub struct RestApi { - // TODO [ToDr] cors_domains should be handled by the server to avoid duplicated logic. - // RequestMiddleware should be able to tell that cors headers should be included. - cors_domains: Option>, - apps: Vec, fetcher: Arc, } impl RestApi { - pub fn new(cors_domains: Vec, endpoints: &Endpoints, fetcher: Arc) -> Box { + pub fn new(fetcher: Arc) -> Box { Box::new(RestApi { - cors_domains: Some(cors_domains), - apps: Self::list_apps(endpoints), fetcher: fetcher, }) } - - fn list_apps(endpoints: &Endpoints) -> Vec { - endpoints.iter().filter_map(|(ref k, ref e)| { - e.info().map(|ref info| App::from_info(k, info)) - }).collect() - } } impl Endpoint for RestApi { @@ -62,7 +47,6 @@ impl Endpoint for RestApi { struct RestApiRouter { api: RestApi, - cors_header: Option, path: Option, control: Option, handler: Box, @@ -72,7 +56,6 @@ impl RestApiRouter { fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { RestApiRouter { path: Some(path), - cors_header: None, control: Some(control), api: api, handler: response::as_json_error(&ApiError { @@ -92,35 +75,10 @@ impl RestApiRouter { _ => None } } - - /// Returns basic headers for a response (it may be overwritten by the handler) - fn response_headers(cors_header: Option) -> header::Headers { - let mut headers = header::Headers::new(); - - if let Some(cors_header) = cors_header { - headers.set(header::AccessControlAllowCredentials); - headers.set(header::AccessControlAllowMethods(vec![ - Method::Options, - Method::Post, - Method::Get, - ])); - headers.set(header::AccessControlAllowHeaders(vec![ - UniCase("origin".to_owned()), - UniCase("content-type".to_owned()), - UniCase("accept".to_owned()), - ])); - - headers.set(cors_header); - } - - headers - } } impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { - self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into(); - if let Method::Options = *request.method() { self.handler = response::empty(); return Next::write(); @@ -144,7 +102,6 @@ impl server::Handler for RestApiRouter { if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() } let handler = endpoint.and_then(|v| match v { - "apps" => Some(response::as_json(&self.api.apps)), "ping" => Some(response::ping()), "content" => self.resolve_content(hash, path, control), _ => None @@ -163,7 +120,6 @@ impl server::Handler for RestApiRouter { } fn on_response(&mut self, res: &mut server::Response) -> Next { - *res.headers_mut() = Self::response_headers(self.cors_header.take()); self.handler.on_response(res) } diff --git a/dapps/src/api/mod.rs b/dapps/src/api/mod.rs index f04b18878..4ffb9f791 100644 --- a/dapps/src/api/mod.rs +++ b/dapps/src/api/mod.rs @@ -21,4 +21,3 @@ mod response; mod types; pub use self::api::RestApi; -pub use self::types::App; diff --git a/dapps/src/api/response.rs b/dapps/src/api/response.rs index 380b1f996..2da2d0c14 100644 --- a/dapps/src/api/response.rs +++ b/dapps/src/api/response.rs @@ -23,12 +23,6 @@ pub fn empty() -> Box { Box::new(ContentHandler::ok("".into(), mime!(Text/Plain))) } -pub fn as_json(val: &T) -> Box { - let json = serde_json::to_string(val) - .expect("serialization to string is infallible; qed"); - Box::new(ContentHandler::ok(json, mime!(Application/Json))) -} - pub fn as_json_error(val: &T) -> Box { let json = serde_json::to_string(val) .expect("serialization to string is infallible; qed"); diff --git a/dapps/src/api/types.rs b/dapps/src/api/types.rs index a690a0b2b..549186955 100644 --- a/dapps/src/api/types.rs +++ b/dapps/src/api/types.rs @@ -14,46 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use endpoint::EndpointInfo; - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct App { - pub id: String, - pub name: String, - pub description: String, - pub version: String, - pub author: String, - #[serde(rename="iconUrl")] - pub icon_url: String, -} - -impl App { - /// Creates `App` instance from `EndpointInfo` and `id`. - pub fn from_info(id: &str, info: &EndpointInfo) -> Self { - App { - id: id.to_owned(), - name: info.name.to_owned(), - description: info.description.to_owned(), - version: info.version.to_owned(), - author: info.author.to_owned(), - icon_url: info.icon_url.to_owned(), - } - } -} - -impl Into for App { - fn into(self) -> EndpointInfo { - EndpointInfo { - name: self.name, - description: self.description, - version: self.version, - author: self.author, - icon_url: self.icon_url, - } - } -} - #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ApiError { diff --git a/dapps/src/apps/app.rs b/dapps/src/apps/app.rs new file mode 100644 index 000000000..1d2c9dca6 --- /dev/null +++ b/dapps/src/apps/app.rs @@ -0,0 +1,55 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use endpoint::EndpointInfo; + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct App { + pub id: String, + pub name: String, + pub description: String, + pub version: String, + pub author: String, + #[serde(rename="iconUrl")] + pub icon_url: String, +} + +impl App { + /// Creates `App` instance from `EndpointInfo` and `id`. + pub fn from_info(id: &str, info: &EndpointInfo) -> Self { + App { + id: id.to_owned(), + name: info.name.to_owned(), + description: info.description.to_owned(), + version: info.version.to_owned(), + author: info.author.to_owned(), + icon_url: info.icon_url.to_owned(), + } + } +} + +impl Into for App { + fn into(self) -> EndpointInfo { + EndpointInfo { + name: self.name, + description: self.description, + version: self.version, + author: self.author, + icon_url: self.icon_url, + } + } +} diff --git a/dapps/src/apps/fetcher/mod.rs b/dapps/src/apps/fetcher/mod.rs index ec8004b30..d621042c4 100644 --- a/dapps/src/apps/fetcher/mod.rs +++ b/dapps/src/apps/fetcher/mod.rs @@ -55,6 +55,7 @@ pub struct ContentFetcher, remote: Remote, fetch: F, + only_content: bool, } impl Drop for ContentFetcher { @@ -66,7 +67,12 @@ impl Drop for ContentFetcher { impl ContentFetcher { - pub fn new(resolver: R, sync_status: Arc, embeddable_on: Option<(String, u16)>, remote: Remote, fetch: F) -> Self { + pub fn new( + resolver: R, + sync_status: Arc, + remote: Remote, + fetch: F, + ) -> Self { let mut dapps_path = env::temp_dir(); dapps_path.push(random_filename()); @@ -75,12 +81,23 @@ impl ContentFetcher { resolver: resolver, sync: sync_status, cache: Arc::new(Mutex::new(ContentCache::default())), - embeddable_on: embeddable_on, + embeddable_on: None, remote: remote, fetch: fetch, + only_content: true, } } + pub fn allow_dapps(mut self, dapps: bool) -> Self { + self.only_content = !dapps; + self + } + + pub fn embeddable_on(mut self, embeddable_on: Option<(String, u16)>) -> Self { + self.embeddable_on = embeddable_on; + self + } + fn still_syncing(address: Option<(String, u16)>) -> Box { Box::new(ContentHandler::error( StatusCode::ServiceUnavailable, @@ -91,6 +108,16 @@ impl ContentFetcher { )) } + fn dapps_disabled(address: Option<(String, u16)>) -> Box { + Box::new(ContentHandler::error( + StatusCode::ServiceUnavailable, + "Network Dapps Not Available", + "This interface doesn't support network dapps for security reasons.", + None, + address, + )) + } + #[cfg(test)] fn set_status(&self, content_id: &str, status: ContentStatus) { self.cache.lock().insert(content_id.to_owned(), status); @@ -163,6 +190,9 @@ impl Fetcher for ContentFetcher { Some(URLHintResult::Dapp(_)) if self.sync.is_major_importing() => { (None, Self::still_syncing(self.embeddable_on.clone())) }, + Some(URLHintResult::Dapp(_)) if self.only_content => { + (None, Self::dapps_disabled(self.embeddable_on.clone())) + }, Some(URLHintResult::Dapp(dapp)) => { let handler = ContentFetcherHandler::new( dapp.url(), @@ -254,7 +284,8 @@ mod tests { fn should_true_if_contains_the_app() { // given let path = env::temp_dir(); - let fetcher = ContentFetcher::new(FakeResolver, Arc::new(|| false), None, Remote::new_sync(), Client::new().unwrap()); + let fetcher = ContentFetcher::new(FakeResolver, Arc::new(|| false), Remote::new_sync(), Client::new().unwrap()) + .allow_dapps(true); let handler = LocalPageEndpoint::new(path, EndpointInfo { name: "fake".into(), description: "".into(), diff --git a/dapps/src/apps/fs.rs b/dapps/src/apps/fs.rs index d14f52c69..8c5c65202 100644 --- a/dapps/src/apps/fs.rs +++ b/dapps/src/apps/fs.rs @@ -14,12 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::collections::BTreeMap; use std::io; use std::io::Read; use std::fs; use std::path::{Path, PathBuf}; use page::{LocalPageEndpoint, PageCache}; -use endpoint::{Endpoints, EndpointInfo}; +use endpoint::{Endpoint, EndpointInfo}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest}; struct LocalDapp { @@ -85,8 +86,8 @@ fn local_dapp(name: String, path: PathBuf) -> LocalDapp { /// Returns endpoints for Local Dapps found for given filesystem path. /// Scans the directory and collects `LocalPageEndpoints`. -pub fn local_endpoints>(dapps_path: P, signer_address: Option<(String, u16)>) -> Endpoints { - let mut pages = Endpoints::new(); +pub fn local_endpoints>(dapps_path: P, signer_address: Option<(String, u16)>) -> BTreeMap> { + let mut pages = BTreeMap::>::new(); for dapp in local_dapps(dapps_path.as_ref()) { pages.insert( dapp.id, diff --git a/dapps/src/apps/manifest.rs b/dapps/src/apps/manifest.rs index a40cfb8b9..946114980 100644 --- a/dapps/src/apps/manifest.rs +++ b/dapps/src/apps/manifest.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use serde_json; -pub use api::App as Manifest; +pub use apps::App as Manifest; pub const MANIFEST_FILENAME: &'static str = "manifest.json"; diff --git a/dapps/src/apps/mod.rs b/dapps/src/apps/mod.rs index b85f0dde9..b3c5a5cef 100644 --- a/dapps/src/apps/mod.rs +++ b/dapps/src/apps/mod.rs @@ -14,8 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::collections::BTreeMap; use std::path::PathBuf; use std::sync::Arc; + use endpoint::{Endpoints, Endpoint}; use page::PageEndpoint; use proxypac::ProxyPac; @@ -23,17 +25,19 @@ use web::Web; use fetch::Fetch; use parity_dapps::WebApp; use parity_reactor::Remote; +use parity_ui; use {WebProxyTokens}; +mod app; mod cache; mod fs; +mod ui; pub mod fetcher; pub mod manifest; -extern crate parity_ui; +pub use self::app::App; -pub const HOME_PAGE: &'static str = "parity"; -pub const DAPPS_DOMAIN: &'static str = ".web3.site"; +pub const HOME_PAGE: &'static str = "home"; pub const RPC_PATH: &'static str = "rpc"; pub const API_PATH: &'static str = "api"; pub const UTILS_PATH: &'static str = "parity-utils"; @@ -44,18 +48,27 @@ pub fn utils() -> Box { Box::new(PageEndpoint::with_prefix(parity_ui::App::default(), UTILS_PATH.to_owned())) } +pub fn ui() -> Box { + Box::new(PageEndpoint::with_fallback_to_index(parity_ui::App::default())) +} + +pub fn ui_redirection(ui_address: Option<(String, u16)>) -> Box { + Box::new(ui::Redirection::new(ui_address)) +} + pub fn all_endpoints( dapps_path: PathBuf, extra_dapps: Vec, - signer_address: Option<(String, u16)>, + dapps_domain: String, + ui_address: Option<(String, u16)>, web_proxy_tokens: Arc, remote: Remote, fetch: F, ) -> Endpoints { // fetch fs dapps at first to avoid overwriting builtins - let mut pages = fs::local_endpoints(dapps_path, signer_address.clone()); + let mut pages = fs::local_endpoints(dapps_path, ui_address.clone()); for path in extra_dapps { - if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), signer_address.clone()) { + if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), ui_address.clone()) { pages.insert(id, endpoint); } else { warn!(target: "dapps", "Ignoring invalid dapp at {}", path.display()); @@ -63,14 +76,14 @@ pub fn all_endpoints( } // NOTE [ToDr] Dapps will be currently embeded on 8180 - insert::(&mut pages, "ui", Embeddable::Yes(signer_address.clone())); - pages.insert("proxy".into(), ProxyPac::boxed(signer_address.clone())); - pages.insert(WEB_PATH.into(), Web::boxed(signer_address.clone(), web_proxy_tokens.clone(), remote.clone(), fetch.clone())); + insert::(&mut pages, "ui", Embeddable::Yes(ui_address.clone())); + pages.insert("proxy".into(), ProxyPac::boxed(ui_address.clone(), dapps_domain)); + pages.insert(WEB_PATH.into(), Web::boxed(ui_address.clone(), web_proxy_tokens.clone(), remote.clone(), fetch.clone())); - pages + Arc::new(pages) } -fn insert(pages: &mut Endpoints, id: &str, embed_at: Embeddable) { +fn insert(pages: &mut BTreeMap>, id: &str, embed_at: Embeddable) { pages.insert(id.to_owned(), Box::new(match embed_at { Embeddable::Yes(address) => PageEndpoint::new_safe_to_embed(T::default(), address), Embeddable::No => PageEndpoint::new(T::default()), diff --git a/dapps/src/apps/ui.rs b/dapps/src/apps/ui.rs new file mode 100644 index 000000000..d5e7bd5e8 --- /dev/null +++ b/dapps/src/apps/ui.rs @@ -0,0 +1,55 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! UI redirections + +use hyper::{Control, StatusCode}; + +use endpoint::{Endpoint, Handler, EndpointPath}; +use {address, handlers}; + +/// Redirection to UI server. +pub struct Redirection { + signer_address: Option<(String, u16)>, +} + +impl Redirection { + pub fn new( + signer_address: Option<(String, u16)>, + ) -> Self { + Redirection { + signer_address: signer_address, + } + } +} + +impl Endpoint for Redirection { + fn to_async_handler(&self, _path: EndpointPath, _control: Control) -> Box { + if let Some(ref signer_address) = self.signer_address { + trace!(target: "dapps", "Redirecting to signer interface."); + handlers::Redirection::boxed(&format!("http://{}", address(signer_address))) + } else { + trace!(target: "dapps", "Signer disabled, returning 404."); + Box::new(handlers::ContentHandler::error( + StatusCode::NotFound, + "404 Not Found", + "Your homepage is not available when Trusted Signer is disabled.", + Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), + self.signer_address.clone(), + )) + } + } +} diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index ea5825b74..ea8fd0a38 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -16,6 +16,7 @@ //! URL Endpoint traits +use std::sync::Arc; use std::collections::BTreeMap; use hyper::{self, server, net}; @@ -38,7 +39,7 @@ pub struct EndpointInfo { pub icon_url: String, } -pub type Endpoints = BTreeMap>; +pub type Endpoints = Arc>>; pub type Handler = server::Handler + Send; pub trait Endpoint : Send + Sync { diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 5f4b83325..0860f0c10 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -40,6 +40,7 @@ extern crate fetch; extern crate parity_dapps_glue as parity_dapps; extern crate parity_hash_fetch as hash_fetch; extern crate parity_reactor; +extern crate parity_ui; #[macro_use] extern crate log; @@ -70,7 +71,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::collections::HashMap; -use jsonrpc_http_server::{self as http, hyper, AccessControlAllowOrigin}; +use jsonrpc_http_server::{self as http, hyper}; use fetch::Fetch; use parity_reactor::Remote; @@ -97,18 +98,74 @@ impl WebProxyTokens for F where F: Fn(String) -> bool + Send + Sync { fn is_web_proxy_token_valid(&self, token: &str) -> bool { self(token.to_owned()) } } +/// Current supported endpoints. +pub struct Endpoints { + endpoints: endpoint::Endpoints, +} + +impl Endpoints { + /// Returns a current list of app endpoints. + pub fn list(&self) -> Vec { + self.endpoints.iter().filter_map(|(ref k, ref e)| { + e.info().map(|ref info| apps::App::from_info(k, info)) + }).collect() + } +} + /// Dapps server as `jsonrpc-http-server` request middleware. pub struct Middleware { router: router::Router, + endpoints: endpoint::Endpoints, } impl Middleware { - /// Creates new Dapps server middleware. - pub fn new( + /// Get local endpoints handle. + pub fn endpoints(&self) -> Endpoints { + Endpoints { + endpoints: self.endpoints.clone(), + } + } + + /// Creates new middleware for UI server. + pub fn ui( remote: Remote, - signer_address: Option<(String, u16)>, + registrar: Arc, + sync_status: Arc, + fetch: F, + dapps_domain: String, + ) -> Self { + let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( + hash_fetch::urlhint::URLHintContract::new(registrar), + sync_status, + remote.clone(), + fetch.clone(), + ).embeddable_on(None).allow_dapps(false)); + let special = { + let mut special = special_endpoints(content_fetcher.clone()); + special.insert(router::SpecialEndpoint::Home, Some(apps::ui())); + special + }; + let router = router::Router::new( + content_fetcher, + None, + special, + None, + dapps_domain, + ); + + Middleware { + router: router, + endpoints: Default::default(), + } + } + + /// Creates new Dapps server middleware. + pub fn dapps( + remote: Remote, + ui_address: Option<(String, u16)>, dapps_path: PathBuf, extra_dapps: Vec, + dapps_domain: String, registrar: Arc, sync_status: Arc, web_proxy_tokens: Arc, @@ -117,45 +174,36 @@ impl Middleware { let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), sync_status, - signer_address.clone(), remote.clone(), fetch.clone(), - )); + ).embeddable_on(ui_address.clone()).allow_dapps(true)); let endpoints = apps::all_endpoints( dapps_path, extra_dapps, - signer_address.clone(), + dapps_domain.clone(), + ui_address.clone(), web_proxy_tokens, remote.clone(), fetch.clone(), ); - let cors_domains = cors_domains(signer_address.clone()); - let special = { - let mut special = HashMap::new(); - special.insert(router::SpecialEndpoint::Rpc, None); - special.insert(router::SpecialEndpoint::Utils, Some(apps::utils())); - special.insert( - router::SpecialEndpoint::Api, - Some(api::RestApi::new( - cors_domains.clone(), - &endpoints, - content_fetcher.clone() - )), - ); + let mut special = special_endpoints(content_fetcher.clone()); + special.insert(router::SpecialEndpoint::Home, Some(apps::ui_redirection(ui_address.clone()))); special }; let router = router::Router::new( - signer_address, content_fetcher, - endpoints, + Some(endpoints.clone()), special, + ui_address, + dapps_domain, ); Middleware { router: router, + endpoints: endpoints, } } } @@ -166,21 +214,12 @@ impl http::RequestMiddleware for Middleware { } } -/// Returns a list of CORS domains for API endpoint. -fn cors_domains(signer_address: Option<(String, u16)>) -> Vec { - use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; - - match signer_address { - Some(signer_address) => [ - format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("http://{}", address(&signer_address)), - format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("https://{}", address(&signer_address)), - ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), - None => vec![], - } +fn special_endpoints(content_fetcher: Arc) -> HashMap>> { + let mut special = HashMap::new(); + special.insert(router::SpecialEndpoint::Rpc, None); + special.insert(router::SpecialEndpoint::Utils, Some(apps::utils())); + special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new(content_fetcher))); + special } fn address(address: &(String, u16)) -> String { @@ -193,29 +232,3 @@ fn random_filename() -> String { let mut rng = ::rand::OsRng::new().unwrap(); rng.gen_ascii_chars().take(12).collect() } - -#[cfg(test)] -mod util_tests { - use super::cors_domains; - use jsonrpc_http_server::AccessControlAllowOrigin; - - #[test] - fn should_return_cors_domains() { - // given - - // when - let none = cors_domains(None); - let some = cors_domains(Some(("127.0.0.1".into(), 18180))); - - // then - assert_eq!(none, Vec::::new()); - assert_eq!(some, vec![ - "http://parity.web3.site".into(), - "http://parity.web3.site:18180".into(), - "http://127.0.0.1:18180".into(), - "https://parity.web3.site".into(), - "https://parity.web3.site:18180".into(), - "https://127.0.0.1:18180".into(), - ]); - } -} diff --git a/dapps/src/page/builtin.rs b/dapps/src/page/builtin.rs index c778b2977..e93c29538 100644 --- a/dapps/src/page/builtin.rs +++ b/dapps/src/page/builtin.rs @@ -27,6 +27,7 @@ pub struct PageEndpoint { /// Safe to be loaded in frame by other origin. (use wisely!) safe_to_embed_on: Option<(String, u16)>, info: EndpointInfo, + fallback_to_index_html: bool, } impl PageEndpoint { @@ -38,6 +39,20 @@ impl PageEndpoint { prefix: None, safe_to_embed_on: None, info: EndpointInfo::from(info), + fallback_to_index_html: false, + } + } + + /// Creates a new `PageEndpoint` for builtin (compile time) Dapp. + /// Instead of returning 404 this endpoint will always server index.html. + pub fn with_fallback_to_index(app: T) -> Self { + let info = app.info(); + PageEndpoint { + app: Arc::new(app), + prefix: None, + safe_to_embed_on: None, + info: EndpointInfo::from(info), + fallback_to_index_html: true, } } @@ -51,6 +66,7 @@ impl PageEndpoint { prefix: Some(prefix), safe_to_embed_on: None, info: EndpointInfo::from(info), + fallback_to_index_html: false, } } @@ -64,6 +80,7 @@ impl PageEndpoint { prefix: None, safe_to_embed_on: address, info: EndpointInfo::from(info), + fallback_to_index_html: false, } } } @@ -76,7 +93,7 @@ impl Endpoint for PageEndpoint { fn to_handler(&self, path: EndpointPath) -> Box { Box::new(handler::PageHandler { - app: BuiltinDapp::new(self.app.clone()), + app: BuiltinDapp::new(self.app.clone(), self.fallback_to_index_html), prefix: self.prefix.clone(), path: path, file: handler::ServedFile::new(self.safe_to_embed_on.clone()), @@ -100,12 +117,14 @@ impl From for EndpointInfo { struct BuiltinDapp { app: Arc, + fallback_to_index_html: bool, } impl BuiltinDapp { - fn new(app: Arc) -> Self { + fn new(app: Arc, fallback_to_index_html: bool) -> Self { BuiltinDapp { app: app, + fallback_to_index_html: fallback_to_index_html, } } } @@ -114,13 +133,19 @@ impl handler::Dapp for BuiltinDapp { type DappFile = BuiltinDappFile; fn file(&self, path: &str) -> Option { - self.app.file(path).map(|_| { + let file = |path| self.app.file(path).map(|_| { BuiltinDappFile { app: self.app.clone(), path: path.into(), write_pos: 0, } - }) + }); + let res = file(path); + if self.fallback_to_index_html { + res.or_else(|| file("index.html")) + } else { + res + } } } diff --git a/dapps/src/proxypac.rs b/dapps/src/proxypac.rs index 16459d88e..13ea4c665 100644 --- a/dapps/src/proxypac.rs +++ b/dapps/src/proxypac.rs @@ -18,17 +18,19 @@ use endpoint::{Endpoint, Handler, EndpointPath}; use handlers::ContentHandler; -use apps::{HOME_PAGE, DAPPS_DOMAIN}; +use apps::HOME_PAGE; use address; pub struct ProxyPac { signer_address: Option<(String, u16)>, + dapps_domain: String, } impl ProxyPac { - pub fn boxed(signer_address: Option<(String, u16)>) -> Box { + pub fn boxed(signer_address: Option<(String, u16)>, dapps_domain: String) -> Box { Box::new(ProxyPac { - signer_address: signer_address + signer_address: signer_address, + dapps_domain: dapps_domain, }) } } @@ -43,12 +45,12 @@ impl Endpoint for ProxyPac { let content = format!( r#" function FindProxyForURL(url, host) {{ - if (shExpMatch(host, "{0}{1}")) + if (shExpMatch(host, "{0}.{1}")) {{ return "PROXY {4}"; }} - if (shExpMatch(host, "*{1}")) + if (shExpMatch(host, "*.{1}")) {{ return "PROXY {2}:{3}"; }} @@ -56,7 +58,7 @@ function FindProxyForURL(url, host) {{ return "DIRECT"; }} "#, - HOME_PAGE, DAPPS_DOMAIN, path.host, path.port, signer); + HOME_PAGE, self.dapps_domain, path.host, path.port, signer); Box::new(ContentHandler::ok(content, mime!(Application/Javascript))) } diff --git a/dapps/src/router.rs b/dapps/src/router.rs index c7b7fb7ff..b3454c782 100644 --- a/dapps/src/router.rs +++ b/dapps/src/router.rs @@ -17,20 +17,19 @@ //! Router implementation //! Dispatch requests to proper application. -use address; use std::cmp; use std::sync::Arc; use std::collections::HashMap; use url::{Url, Host}; -use hyper::{self, server, header, Control, StatusCode}; +use hyper::{self, server, header, Control}; use hyper::net::HttpStream; use jsonrpc_http_server as http; -use apps::{self, DAPPS_DOMAIN}; +use apps; use apps::fetcher::Fetcher; use endpoint::{Endpoint, Endpoints, EndpointPath, Handler}; -use handlers::{self, Redirection, ContentHandler}; +use handlers; /// Special endpoints are accessible on every domain (every dapp) #[derive(Debug, PartialEq, Hash, Eq)] @@ -38,26 +37,28 @@ pub enum SpecialEndpoint { Rpc, Api, Utils, + Home, None, } pub struct Router { - signer_address: Option<(String, u16)>, - endpoints: Endpoints, + endpoints: Option, fetch: Arc, special: HashMap>>, + embeddable_on: Option<(String, u16)>, + dapps_domain: String, } impl http::RequestMiddleware for Router { fn on_request(&self, req: &server::Request, control: &Control) -> http::RequestMiddlewareAction { // Choose proper handler depending on path / domain let url = handlers::extract_url(req); - let endpoint = extract_endpoint(&url); - let referer = extract_referer_endpoint(req); + let endpoint = extract_endpoint(&url, &self.dapps_domain); + let referer = extract_referer_endpoint(req, &self.dapps_domain); let is_utils = endpoint.1 == SpecialEndpoint::Utils; - let is_dapps_domain = endpoint.0.as_ref().map(|endpoint| endpoint.using_dapps_domains).unwrap_or(false); - let is_origin_set = req.headers().get::().is_some(); + let is_origin_set = req.headers().get::().is_some(); let is_get_request = *req.method() == hyper::Method::Get; + let is_head_request = *req.method() == hyper::Method::Head; trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); @@ -67,7 +68,7 @@ impl http::RequestMiddleware for Router { // Handle invalid web requests that we can recover from (ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) if referer.app_id == apps::WEB_PATH - && self.endpoints.contains_key(apps::WEB_PATH) + && self.endpoints.as_ref().map(|ep| ep.contains_key(apps::WEB_PATH)).unwrap_or(false) && !is_web_endpoint(path) => { @@ -75,7 +76,7 @@ impl http::RequestMiddleware for Router { let len = cmp::min(referer_url.path.len(), 2); // /web// let base = referer_url.path[..len].join("/"); let requested = url.map(|u| u.path.join("/")).unwrap_or_default(); - Some(Redirection::boxed(&format!("/{}/{}", base, requested))) + Some(handlers::Redirection::boxed(&format!("/{}/{}", base, requested))) }, // First check special endpoints (ref path, ref endpoint, _) if self.special.contains_key(endpoint) => { @@ -86,9 +87,12 @@ impl http::RequestMiddleware for Router { .map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control)) }, // Then delegate to dapp - (Some(ref path), _, _) if self.endpoints.contains_key(&path.app_id) => { + (Some(ref path), _, _) if self.endpoints.as_ref().map(|ep| ep.contains_key(&path.app_id)).unwrap_or(false) => { trace!(target: "dapps", "Resolving to local/builtin dapp."); - Some(self.endpoints.get(&path.app_id) + Some(self.endpoints + .as_ref() + .expect("endpoints known to be set; qed") + .get(&path.app_id) .expect("endpoints known to contain key; qed") .to_async_handler(path.clone(), control)) }, @@ -97,36 +101,28 @@ impl http::RequestMiddleware for Router { trace!(target: "dapps", "Resolving to fetchable content."); Some(self.fetch.to_async_handler(path.clone(), control)) }, - // NOTE [todr] /home is redirected to home page since some users may have the redirection cached - // (in the past we used 301 instead of 302) - // It should be safe to remove it in (near) future. - // - // 404 for non-existent content - (Some(ref path), _, _) if is_get_request && path.app_id != "home" => { + // 404 for non-existent content (only if serving endpoints and not homepage) + (Some(ref path), _, _) + if (is_get_request || is_head_request) + && self.endpoints.is_some() + && path.app_id != apps::HOME_PAGE + => + { trace!(target: "dapps", "Resolving to 404."); - Some(Box::new(ContentHandler::error( - StatusCode::NotFound, + Some(Box::new(handlers::ContentHandler::error( + hyper::StatusCode::NotFound, "404 Not Found", "Requested content was not found.", None, - self.signer_address.clone(), + self.embeddable_on.clone(), ))) }, - // Redirect any other GET request to signer. - _ if is_get_request => { - if let Some(ref signer_address) = self.signer_address { - trace!(target: "dapps", "Redirecting to signer interface."); - Some(Redirection::boxed(&format!("http://{}", address(signer_address)))) - } else { - trace!(target: "dapps", "Signer disabled, returning 404."); - Some(Box::new(ContentHandler::error( - StatusCode::NotFound, - "404 Not Found", - "Your homepage is not available when Trusted Signer is disabled.", - Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), - self.signer_address.clone(), - ))) - } + // Any other GET|HEAD requests to home page. + _ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => { + self.special.get(&SpecialEndpoint::Home) + .expect("special known to contain key; qed") + .as_ref() + .map(|special| special.to_async_handler(Default::default(), control)) }, // RPC by default _ => { @@ -137,7 +133,7 @@ impl http::RequestMiddleware for Router { match handler { Some(handler) => http::RequestMiddlewareAction::Respond { - should_validate_hosts: !(is_utils || is_dapps_domain), + should_validate_hosts: !is_utils, handler: handler, }, None => http::RequestMiddlewareAction::Proceed { @@ -149,16 +145,18 @@ impl http::RequestMiddleware for Router { impl Router { pub fn new( - signer_address: Option<(String, u16)>, content_fetcher: Arc, - endpoints: Endpoints, + endpoints: Option, special: HashMap>>, + embeddable_on: Option<(String, u16)>, + dapps_domain: String, ) -> Self { Router { - signer_address: signer_address, endpoints: endpoints, fetch: content_fetcher, special: special, + embeddable_on: embeddable_on, + dapps_domain: format!(".{}", dapps_domain), } } } @@ -170,19 +168,19 @@ fn is_web_endpoint(path: &Option) -> bool { } } -fn extract_referer_endpoint(req: &server::Request) -> Option<(EndpointPath, Url)> { +fn extract_referer_endpoint(req: &server::Request, dapps_domain: &str) -> Option<(EndpointPath, Url)> { let referer = req.headers().get::(); let url = referer.and_then(|referer| Url::parse(&referer.0).ok()); url.and_then(|url| { let option = Some(url); - extract_url_referer_endpoint(&option).or_else(|| { - extract_endpoint(&option).0.map(|endpoint| (endpoint, option.expect("Just wrapped; qed"))) + extract_url_referer_endpoint(&option, dapps_domain).or_else(|| { + extract_endpoint(&option, dapps_domain).0.map(|endpoint| (endpoint, option.expect("Just wrapped; qed"))) }) }) } -fn extract_url_referer_endpoint(url: &Option) -> Option<(EndpointPath, Url)> { +fn extract_url_referer_endpoint(url: &Option, dapps_domain: &str) -> Option<(EndpointPath, Url)> { let query = url.as_ref().and_then(|url| url.query.as_ref()); match (url, query) { (&Some(ref url), Some(ref query)) if query.starts_with(apps::URL_REFERER) => { @@ -190,7 +188,7 @@ fn extract_url_referer_endpoint(url: &Option) -> Option<(EndpointPath, Url) debug!(target: "dapps", "Recovering referer from query parameter: {}", referer_url); let referer_url = Url::parse(&referer_url).ok(); - extract_endpoint(&referer_url).0.map(|endpoint| { + extract_endpoint(&referer_url, dapps_domain).0.map(|endpoint| { (endpoint, referer_url.expect("Endpoint returned only when url `is_some`").clone()) }) }, @@ -198,7 +196,7 @@ fn extract_url_referer_endpoint(url: &Option) -> Option<(EndpointPath, Url) } } -fn extract_endpoint(url: &Option) -> (Option, SpecialEndpoint) { +fn extract_endpoint(url: &Option, dapps_domain: &str) -> (Option, SpecialEndpoint) { fn special_endpoint(url: &Url) -> SpecialEndpoint { if url.path.len() <= 1 { return SpecialEndpoint::None; @@ -208,14 +206,15 @@ fn extract_endpoint(url: &Option) -> (Option, SpecialEndpoint apps::RPC_PATH => SpecialEndpoint::Rpc, apps::API_PATH => SpecialEndpoint::Api, apps::UTILS_PATH => SpecialEndpoint::Utils, + apps::HOME_PAGE => SpecialEndpoint::Home, _ => SpecialEndpoint::None, } } match *url { Some(ref url) => match url.host { - Host::Domain(ref domain) if domain.ends_with(DAPPS_DOMAIN) => { - let id = &domain[0..(domain.len() - DAPPS_DOMAIN.len())]; + Host::Domain(ref domain) if domain.ends_with(dapps_domain) => { + let id = &domain[0..(domain.len() - dapps_domain.len())]; let (id, params) = if let Some(split) = id.rfind('.') { let (params, id) = id.split_at(split); (id[1..].to_owned(), [params.to_owned()].into_iter().chain(&url.path).cloned().collect()) @@ -249,11 +248,12 @@ fn extract_endpoint(url: &Option) -> (Option, SpecialEndpoint #[test] fn should_extract_endpoint() { - assert_eq!(extract_endpoint(&None), (None, SpecialEndpoint::None)); + let dapps_domain = ".web3.site"; + assert_eq!(extract_endpoint(&None, dapps_domain), (None, SpecialEndpoint::None)); // With path prefix assert_eq!( - extract_endpoint(&Url::parse("http://localhost:8080/status/index.html").ok()), + extract_endpoint(&Url::parse("http://localhost:8080/status/index.html").ok(), dapps_domain), (Some(EndpointPath { app_id: "status".to_owned(), app_params: vec!["index.html".to_owned()], @@ -265,7 +265,7 @@ fn should_extract_endpoint() { // With path prefix assert_eq!( - extract_endpoint(&Url::parse("http://localhost:8080/rpc/").ok()), + extract_endpoint(&Url::parse("http://localhost:8080/rpc/").ok(), dapps_domain), (Some(EndpointPath { app_id: "rpc".to_owned(), app_params: vec!["".to_owned()], @@ -276,7 +276,7 @@ fn should_extract_endpoint() { ); assert_eq!( - extract_endpoint(&Url::parse("http://my.status.web3.site/parity-utils/inject.js").ok()), + extract_endpoint(&Url::parse("http://my.status.web3.site/parity-utils/inject.js").ok(), dapps_domain), (Some(EndpointPath { app_id: "status".to_owned(), app_params: vec!["my".to_owned(), "parity-utils".into(), "inject.js".into()], @@ -288,7 +288,7 @@ fn should_extract_endpoint() { // By Subdomain assert_eq!( - extract_endpoint(&Url::parse("http://status.web3.site/test.html").ok()), + extract_endpoint(&Url::parse("http://status.web3.site/test.html").ok(), dapps_domain), (Some(EndpointPath { app_id: "status".to_owned(), app_params: vec!["test.html".to_owned()], @@ -300,7 +300,7 @@ fn should_extract_endpoint() { // RPC by subdomain assert_eq!( - extract_endpoint(&Url::parse("http://my.status.web3.site/rpc/").ok()), + extract_endpoint(&Url::parse("http://my.status.web3.site/rpc/").ok(), dapps_domain), (Some(EndpointPath { app_id: "status".to_owned(), app_params: vec!["my".to_owned(), "rpc".into(), "".into()], @@ -312,7 +312,7 @@ fn should_extract_endpoint() { // API by subdomain assert_eq!( - extract_endpoint(&Url::parse("http://my.status.web3.site/api/").ok()), + extract_endpoint(&Url::parse("http://my.status.web3.site/api/").ok(), dapps_domain), (Some(EndpointPath { app_id: "status".to_owned(), app_params: vec!["my".to_owned(), "api".into(), "".into()], diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs deleted file mode 100644 index 74c6d8d89..000000000 --- a/dapps/src/rpc.rs +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::sync::Arc; -use hyper; - -use parity_rpc::{Metadata, Origin}; -use jsonrpc_core::{Middleware, MetaIoHandler}; -use jsonrpc_http_server::{self as http, AccessControlAllowOrigin, HttpMetaExtractor}; -use jsonrpc_http_server::tokio_core::reactor::Remote; -use endpoint::{Endpoint, EndpointPath, Handler}; - -pub fn rpc>( - handler: MetaIoHandler, - remote: Remote, - cors_domains: Vec, -) -> Box { - Box::new(RpcEndpoint { - handler: Arc::new(handler), - remote: remote, - meta_extractor: Arc::new(MetadataExtractor), - cors_domain: Some(cors_domains), - // NOTE [ToDr] We don't need to do any hosts validation here. It's already done in router. - allowed_hosts: None, - }) -} - -struct RpcEndpoint> { - handler: Arc>, - remote: Remote, - meta_extractor: Arc>, - cors_domain: Option>, - allowed_hosts: Option>, -} - - -impl> Endpoint for RpcEndpoint { - fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box { - Box::new(http::ServerHandler::new( - http::Rpc { - handler: self.handler.clone(), - remote: self.remote.clone(), - extractor: self.meta_extractor.clone(), - }, - self.cors_domain.clone(), - self.allowed_hosts.clone(), - Arc::new(NoopMiddleware), - control, - )) - } -} - -#[derive(Default)] -struct NoopMiddleware; -impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, request: &http::hyper::server::Request, _control: &http::hyper::Control) -> http::RequestMiddlewareAction { - http::RequestMiddlewareAction::Proceed { - should_continue_on_invalid_cors: request.headers().get::().is_none(), - } - } -} - -pub struct MetadataExtractor; -impl HttpMetaExtractor for MetadataExtractor { - fn read_metadata(&self, request: &http::hyper::server::Request) -> Metadata { - let dapp_id = request.headers().get::() - .map(|origin| format!("{}://{}", origin.scheme, origin.host)) - .or_else(|| { - // fallback to custom header, but only if origin is null - request.headers().get_raw("origin") - .and_then(|raw| raw.one()) - .and_then(|raw| if raw == "null".as_bytes() { - request.headers().get_raw("x-parity-origin") - .and_then(|raw| raw.one()) - .map(|raw| String::from_utf8_lossy(raw).into_owned()) - } else { - None - }) - }); - Metadata { - origin: Origin::Dapps(dapp_id.map(Into::into).unwrap_or_default()), - } - } -} diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index 043814377..b75cd25f2 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -39,29 +39,6 @@ fn should_return_error() { assert_security_headers(&response.headers); } -#[test] -fn should_serve_apps() { - // given - let server = serve(); - - // when - let response = request(server, - "\ - GET /api/apps HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Content-Type", "application/json"); - assert!(response.body.contains("Parity UI"), response.body); - assert_security_headers(&response.headers); -} - #[test] fn should_handle_ping() { // given @@ -106,92 +83,3 @@ fn should_try_to_resolve_dapp() { assert_eq!(registrar.calls.lock().len(), 2); assert_security_headers(&response.headers); } - -#[test] -fn should_return_signer_port_cors_headers() { - // given - let server = serve(); - - // when - let response = request(server, - "\ - POST /api/ping HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: http://127.0.0.1:18180\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Access-Control-Allow-Origin", "http://127.0.0.1:18180"); -} - -#[test] -fn should_return_signer_port_cors_headers_for_home_parity() { - // given - let server = serve(); - - // when - let response = request(server, - "\ - POST /api/ping HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: http://parity.web3.site\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site"); -} - - -#[test] -fn should_return_signer_port_cors_headers_for_home_parity_with_https() { - // given - let server = serve(); - - // when - let response = request(server, - "\ - POST /api/ping HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: https://parity.web3.site\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Access-Control-Allow-Origin", "https://parity.web3.site"); -} - -#[test] -fn should_return_signer_port_cors_headers_for_home_parity_with_port() { - // given - let server = serve(); - - // when - let response = request(server, - "\ - POST /api/ping HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: http://parity.web3.site:18180\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180"); -} diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index e6c032549..6bc0006ce 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -26,7 +26,7 @@ use jsonrpc_http_server::{self as http, Host, DomainsValidation}; use devtools::http_client; use hash_fetch::urlhint::ContractClient; use fetch::{Fetch, Client as FetchClient}; -use parity_reactor::{EventLoop, Remote}; +use parity_reactor::Remote; use {Middleware, SyncStatus, WebProxyTokens}; @@ -47,20 +47,7 @@ fn init_logger() { } } -pub struct ServerLoop { - pub server: Server, - pub event_loop: EventLoop, -} - -impl ::std::ops::Deref for ServerLoop { - type Target = Server; - - fn deref(&self) -> &Self::Target { - &self.server - } -} - -pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (ServerLoop, Arc) where +pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (Server, Arc) where F: FnOnce(ServerBuilder) -> ServerBuilder, B: Fetch, { @@ -69,44 +56,41 @@ pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (ServerLo let mut dapps_path = env::temp_dir(); dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); - // TODO [ToDr] When https://github.com/paritytech/jsonrpc/issues/26 is resolved - // this additional EventLoop wouldn't be needed, we should be able to re-use remote. - let event_loop = EventLoop::spawn(); let server = process(ServerBuilder::new( &dapps_path, registrar.clone(), remote, )) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap(); ( - ServerLoop { server: server, event_loop: event_loop }, + server, registrar, ) } -pub fn serve_with_rpc(io: IoHandler) -> ServerLoop { +pub fn serve_with_rpc(io: IoHandler) -> Server { init_server(|builder| builder, io, Remote::new_sync()).0 } -pub fn serve_hosts(hosts: Option>) -> ServerLoop { +pub fn serve_hosts(hosts: Option>) -> Server { let hosts = hosts.map(|hosts| hosts.into_iter().map(Into::into).collect()); init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 } -pub fn serve_with_registrar() -> (ServerLoop, Arc) { +pub fn serve_with_registrar() -> (Server, Arc) { init_server(|builder| builder, Default::default(), Remote::new_sync()) } -pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc) { +pub fn serve_with_registrar_and_sync() -> (Server, Arc) { init_server(|builder| { builder.sync_status(Arc::new(|| true)) }, Default::default(), Remote::new_sync()) } -pub fn serve_with_registrar_and_fetch() -> (ServerLoop, FakeFetch, Arc) { +pub fn serve_with_registrar_and_fetch() -> (Server, FakeFetch, Arc) { serve_with_registrar_and_fetch_and_threads(false) } -pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (ServerLoop, FakeFetch, Arc) { +pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Server, FakeFetch, Arc) { let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, reg) = init_server(move |builder| { @@ -116,7 +100,7 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv (server, fetch, reg) } -pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { +pub fn serve_with_fetch(web_token: &'static str) -> (Server, FakeFetch) { let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, _) = init_server(move |builder| { @@ -128,11 +112,11 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { (server, fetch) } -pub fn serve() -> ServerLoop { +pub fn serve() -> Server { init_server(|builder| builder, Default::default(), Remote::new_sync()).0 } -pub fn request(server: ServerLoop, request: &str) -> http_client::Response { +pub fn request(server: Server, request: &str) -> http_client::Response { http_client::request(server.addr(), request) } @@ -240,6 +224,7 @@ impl ServerBuilder { } } +const DAPPS_DOMAIN: &'static str = "web3.site"; /// Webapps HTTP server. pub struct Server { @@ -260,19 +245,27 @@ impl Server { remote: Remote, fetch: F, ) -> Result { - let middleware = Middleware::new( + let middleware = Middleware::dapps( remote, signer_address, dapps_path, extra_dapps, + DAPPS_DOMAIN.into(), registrar, sync_status, web_proxy_tokens, fetch, ); + + let mut allowed_hosts: Option> = allowed_hosts.into(); + allowed_hosts.as_mut().map(|mut hosts| { + hosts.push(format!("http://*.{}:*", DAPPS_DOMAIN).into()); + hosts.push(format!("http://*.{}", DAPPS_DOMAIN).into()); + }); + http::ServerBuilder::new(io) .request_middleware(middleware) - .allowed_hosts(allowed_hosts) + .allowed_hosts(allowed_hosts.into()) .cors(http::DomainsValidation::Disabled) .start_http(addr) .map(|server| Server { diff --git a/dapps/src/tests/redirection.rs b/dapps/src/tests/redirection.rs index 4e3fff4dc..1e9b039e2 100644 --- a/dapps/src/tests/redirection.rs +++ b/dapps/src/tests/redirection.rs @@ -37,15 +37,15 @@ fn should_redirect_to_home() { } #[test] -fn should_redirect_to_home_when_trailing_slash_is_missing() { +fn should_redirect_to_home_with_domain() { // given let server = serve(); // when let response = request(server, "\ - GET /app HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ + GET / HTTP/1.1\r\n\ + Host: home.web3.site\r\n\ Connection: close\r\n\ \r\n\ " @@ -57,14 +57,14 @@ fn should_redirect_to_home_when_trailing_slash_is_missing() { } #[test] -fn should_redirect_to_home_for_users_with_cached_redirection() { +fn should_redirect_to_home_when_trailing_slash_is_missing() { // given let server = serve(); // when let response = request(server, "\ - GET /home/ HTTP/1.1\r\n\ + GET /app HTTP/1.1\r\n\ Host: 127.0.0.1:8080\r\n\ Connection: close\r\n\ \r\n\ @@ -179,7 +179,7 @@ fn should_serve_proxy_pac() { // then response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body, "DD\n\nfunction FindProxyForURL(url, host) {\n\tif (shExpMatch(host, \"parity.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:18180\";\n\t}\n\n\tif (shExpMatch(host, \"*.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:8080\";\n\t}\n\n\treturn \"DIRECT\";\n}\n\n0\n\n".to_owned()); + assert_eq!(response.body, "DB\n\nfunction FindProxyForURL(url, host) {\n\tif (shExpMatch(host, \"home.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:18180\";\n\t}\n\n\tif (shExpMatch(host, \"*.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:8080\";\n\t}\n\n\treturn \"DIRECT\";\n}\n\n0\n\n".to_owned()); assert_security_headers(&response.headers); } diff --git a/js/src/api/rpc/parity/parity.js b/js/src/api/rpc/parity/parity.js index 22b3f9751..4fdaf5b1b 100644 --- a/js/src/api/rpc/parity/parity.js +++ b/js/src/api/rpc/parity/parity.js @@ -90,15 +90,14 @@ export default class Parity { .execute('parity_consensusCapability'); } - dappsPort () { + dappsList () { return this._transport - .execute('parity_dappsPort') - .then(outNumber); + .execute('parity_dappsList'); } - dappsInterface () { + dappsUrl () { return this._transport - .execute('parity_dappsInterface'); + .execute('parity_dappsUrl'); } decryptMessage (address, data) { @@ -530,12 +529,6 @@ export default class Parity { .execute('parity_setVaultMeta', vaultName, JSON.stringify(meta)); } - signerPort () { - return this._transport - .execute('parity_signerPort') - .then(outNumber); - } - signMessage (address, password, messageHash) { return this._transport .execute('parity_signMessage', inAddress(address), password, inHex(messageHash)); @@ -567,4 +560,9 @@ export default class Parity { return this._transport .execute('parity_versionInfo'); } + + wsUrl () { + return this._transport + .execute('parity_wsUrl'); + } } diff --git a/js/src/dapps/console/parity.js b/js/src/dapps/console/parity.js index 9fce483e1..d05cc2350 100644 --- a/js/src/dapps/console/parity.js +++ b/js/src/dapps/console/parity.js @@ -19,312 +19,308 @@ import Web3 from 'web3'; const api = window.parent.secureApi; let web3; -Promise - .all([ - api.parity.dappsInterface(), - api.parity.dappsPort() - ]).then((res) => { - web3 = new Web3(new Web3.providers.HttpProvider(`http://${res.join(':')}/rpc/`)); - window.web3 = web3; +api.parity.dappsUrl().then(url => { + web3 = new Web3(new Web3.providers.HttpProvider(`${window.location.protocol}//${url}/rpc/`)); + window.web3 = web3; - // Usage example: - // web3.eth.traceCall({ - // to: theChicken.address, - // data: theChicken.withdraw.getData(100000000000000000), - // gas: 100000 - // }, - // `["trace", "vmTrace", "stateDiff"] - // ) - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'traceCall', - call: 'trace_call', - params: 2, - inputFormatter: [web3._extend.formatters.inputCallFormatter, null] - }) - ] - }); + // Usage example: + // web3.eth.traceCall({ + // to: theChicken.address, + // data: theChicken.withdraw.getData(100000000000000000), + // gas: 100000 + // }, + // `["trace", "vmTrace", "stateDiff"] + // ) + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'traceCall', + call: 'trace_call', + params: 2, + inputFormatter: [web3._extend.formatters.inputCallFormatter, null] + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'traceSendRawTransaction', - call: 'trace_rawTransaction', - params: 2, - inputFormatter: [null, null] - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'traceSendRawTransaction', + call: 'trace_rawTransaction', + params: 2, + inputFormatter: [null, null] + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'traceReplayTransaction', - call: 'trace_replayTransaction', - params: 2, - inputFormatter: [null, null] - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'traceReplayTransaction', + call: 'trace_replayTransaction', + params: 2, + inputFormatter: [null, null] + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'setMode', - call: 'parity_setMode', - params: 1 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'setMode', + call: 'parity_setMode', + params: 1 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'mode', - call: 'parity_mode', - params: 0 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'mode', + call: 'parity_mode', + params: 0 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'traceTransaction', - call: 'trace_Transaction', - params: 1, - inputFormatter: [null] - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'traceTransaction', + call: 'trace_Transaction', + params: 1, + inputFormatter: [null] + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'gasPriceStatistics', - call: 'parity_gasPriceStatistics', - params: 0, - outputFormatter: function (a) { return a.map(web3.toBigNumber); } - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'gasPriceStatistics', + call: 'parity_gasPriceStatistics', + params: 0, + outputFormatter: function (a) { return a.map(web3.toBigNumber); } + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'registryAddress', - call: 'parity_registryAddress', - params: 0 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'registryAddress', + call: 'parity_registryAddress', + params: 0 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'accountsInfo', - call: 'personal_accountsInfo', - outputFormatter: function (m) { - Object.keys(m).forEach(k => { - m[k].meta = JSON.parse(m[k].meta); - m[k].meta.name = m[k].name; - m[k].meta.uuid = m[k].uuid; - m[k] = m[k].meta; - }); return m; - }, - params: 0 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'accountsInfo', + call: 'personal_accountsInfo', + outputFormatter: function (m) { + Object.keys(m).forEach(k => { + m[k].meta = JSON.parse(m[k].meta); + m[k].meta.name = m[k].name; + m[k].meta.uuid = m[k].uuid; + m[k] = m[k].meta; + }); return m; + }, + params: 0 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'setAccountName', - call: 'personal_setAccountName', - params: 2 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'setAccountName', + call: 'personal_setAccountName', + params: 2 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'setAccountMeta', - call: 'personal_setAccountMeta', - params: 2, - inputFormatter: [a => a, JSON.stringify] - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'setAccountMeta', + call: 'personal_setAccountMeta', + params: 2, + inputFormatter: [a => a, JSON.stringify] + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'postTransaction', - call: 'eth_postTransaction', - params: 1, - inputFormatter: [web3._extend.formatters.inputCallFormatter] - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'postTransaction', + call: 'eth_postTransaction', + params: 1, + inputFormatter: [web3._extend.formatters.inputCallFormatter] + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'postSign', - call: 'eth_postSign', - params: 1 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'postSign', + call: 'eth_postSign', + params: 1 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'encryptMessage', - call: 'parity_encryptMessage', - params: 2 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'encryptMessage', + call: 'parity_encryptMessage', + params: 2 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'checkRequest', - call: 'eth_checkRequest', - params: 1 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'checkRequest', + call: 'eth_checkRequest', + params: 1 + }) + ] + }); - web3._extend({ - property: 'eth', - methods: [ - new web3._extend.Method({ - name: 'listAccounts', - call: 'parity_listAccounts', - params: 0 - }) - ] - }); + web3._extend({ + property: 'eth', + methods: [ + new web3._extend.Method({ + name: 'listAccounts', + call: 'parity_listAccounts', + params: 0 + }) + ] + }); - { - let postTransaction = web3.eth.postTransaction.bind(web3.eth); - let sendTransaction = web3.eth.sendTransaction.bind(web3.eth); + { + let postTransaction = web3.eth.postTransaction.bind(web3.eth); + let sendTransaction = web3.eth.sendTransaction.bind(web3.eth); - web3.eth.sendTransaction = function (options, f) { - // No callback - do sync API. - if (typeof f !== 'function') { - return sendTransaction(options); - } - // Callback - use async API. - let id = postTransaction(options); + web3.eth.sendTransaction = function (options, f) { + // No callback - do sync API. + if (typeof f !== 'function') { + return sendTransaction(options); + } + // Callback - use async API. + let id = postTransaction(options); - console.log('Posted trasaction id=' + id); - let timerId = window.setInterval(check, 500); + console.log('Posted trasaction id=' + id); + let timerId = window.setInterval(check, 500); - function check () { - try { - let r = web3.eth.checkRequest(id); + function check () { + try { + let r = web3.eth.checkRequest(id); - if (typeof r === 'string') { - clearInterval(timerId); - if (r === '0x0000000000000000000000000000000000000000000000000000000000000000') { - f('Rejected', r); - } else { - f(null, r); - } - } else if (r !== null) { - console.log('checkRequest returned: ' + r); - } - } catch (e) { + if (typeof r === 'string') { clearInterval(timerId); - f('Rejected', null); - } - } - }; - } - - web3.eth.installInterceptor = function (interceptor) { - let oldSendTransaction = web3.eth.sendTransaction.bind(web3.eth); - - web3.eth.sendTransaction = function (options, f) { - if (!interceptor(options)) { - return '0x0000000000000000000000000000000000000000000000000000000000000000'; - } - - return oldSendTransaction(options, f); - }; - }; - - web3.eth.reporter = function (e, r) { - if (e) { - console.log('Error confirming transaction: ' + e); - } else { - let addr = r; - let confirmed = false; - let timerId = window.setInterval(function check () { - let receipt = web3.eth.getTransactionReceipt(addr); - - if (receipt != null) { - if (!confirmed) { - console.log('Transaction confirmed (' + r + '); used ' + receipt.gasUsed + ' gas; left ' + receipt.logs.length + ' logs; mining...'); - confirmed = true; - } - if (typeof receipt.blockHash === 'string') { - clearInterval(timerId); - console.log('Mined into block ' + receipt.blockNumber); + if (r === '0x0000000000000000000000000000000000000000000000000000000000000000') { + f('Rejected', r); + } else { + f(null, r); } + } else if (r !== null) { + console.log('checkRequest returned: ' + r); } - }, 500); + } catch (e) { + clearInterval(timerId); + f('Rejected', null); + } } }; + } - { - let oldSha3 = web3.sha3; + web3.eth.installInterceptor = function (interceptor) { + let oldSendTransaction = web3.eth.sendTransaction.bind(web3.eth); - web3.sha3 = function (data, format) { - if (typeof format !== 'string' || (format !== 'hex' && format !== 'bin')) { - format = data.startsWith('0x') ? 'hex' : 'bin'; + web3.eth.sendTransaction = function (options, f) { + if (!interceptor(options)) { + return '0x0000000000000000000000000000000000000000000000000000000000000000'; + } + + return oldSendTransaction(options, f); + }; + }; + + web3.eth.reporter = function (e, r) { + if (e) { + console.log('Error confirming transaction: ' + e); + } else { + let addr = r; + let confirmed = false; + let timerId = window.setInterval(function check () { + let receipt = web3.eth.getTransactionReceipt(addr); + + if (receipt != null) { + if (!confirmed) { + console.log('Transaction confirmed (' + r + '); used ' + receipt.gasUsed + ' gas; left ' + receipt.logs.length + ' logs; mining...'); + confirmed = true; + } + if (typeof receipt.blockHash === 'string') { + clearInterval(timerId); + console.log('Mined into block ' + receipt.blockNumber); + } } - return oldSha3(data, { encoding: format }); - }; + }, 500); } + }; - { - let Registry = web3.eth.contract([{ 'constant': false, 'inputs': [{ 'name': '_new', 'type': 'address' }], 'name': 'setOwner', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'string' }], 'name': 'confirmReverse', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }], 'name': 'reserve', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }, { 'name': '_value', 'type': 'bytes32' }], 'name': 'set', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }], 'name': 'drop', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }], 'name': 'getAddress', 'outputs': [{ 'name': '', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_amount', 'type': 'uint256' }], 'name': 'setFee', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_to', 'type': 'address' }], 'name': 'transfer', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'owner', 'outputs': [{ 'name': '', 'type': 'address' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }], 'name': 'reserved', 'outputs': [{ 'name': 'reserved', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [], 'name': 'drain', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'string' }, { 'name': '_who', 'type': 'address' }], 'name': 'proposeReverse', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }], 'name': 'getUint', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }], 'name': 'get', 'outputs': [{ 'name': '', 'type': 'bytes32' }], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'fee', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '', 'type': 'address' }], 'name': 'reverse', 'outputs': [{ 'name': '', 'type': 'string' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }, { 'name': '_value', 'type': 'uint256' }], 'name': 'setUint', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [], 'name': 'removeReverse', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }, { 'name': '_value', 'type': 'address' }], 'name': 'setAddress', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'anonymous': false, 'inputs': [{ 'indexed': false, 'name': 'amount', 'type': 'uint256' }], 'name': 'Drained', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': false, 'name': 'amount', 'type': 'uint256' }], 'name': 'FeeChanged', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'owner', 'type': 'address' }], 'name': 'Reserved', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'oldOwner', 'type': 'address' }, { 'indexed': true, 'name': 'newOwner', 'type': 'address' }], 'name': 'Transferred', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'owner', 'type': 'address' }], 'name': 'Dropped', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'owner', 'type': 'address' }, { 'indexed': true, 'name': 'key', 'type': 'string' }, { 'indexed': false, 'name': 'plainKey', 'type': 'string' }], 'name': 'DataChanged', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'string' }, { 'indexed': true, 'name': 'reverse', 'type': 'address' }], 'name': 'ReverseProposed', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'string' }, { 'indexed': true, 'name': 'reverse', 'type': 'address' }], 'name': 'ReverseConfirmed', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'string' }, { 'indexed': true, 'name': 'reverse', 'type': 'address' }], 'name': 'ReverseRemoved', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'old', 'type': 'address' }, { 'indexed': true, 'name': 'current', 'type': 'address' }], 'name': 'NewOwner', 'type': 'event' }]); + { + let oldSha3 = web3.sha3; - web3.eth.registry = Registry.at(web3.eth.registryAddress()); - web3.eth.registry.lookup = (name, field) => web3.eth.registry.get(web3.sha3(name), field); - web3.eth.registry.lookupAddress = (name, field) => web3.eth.registry.getAddress(web3.sha3(name), field); - web3.eth.registry.lookupUint = (name, field) => web3.eth.registry.getUint(web3.sha3(name), field); + web3.sha3 = function (data, format) { + if (typeof format !== 'string' || (format !== 'hex' && format !== 'bin')) { + format = data.startsWith('0x') ? 'hex' : 'bin'; + } + return oldSha3(data, { encoding: format }); + }; + } - let TokenReg = web3.eth.contract([{ 'constant': true, 'inputs': [{ 'name': '_id', 'type': 'uint256' }], 'name': 'token', 'outputs': [{ 'name': 'addr', 'type': 'address' }, { 'name': 'tla', 'type': 'string' }, { 'name': 'base', 'type': 'uint256' }, { 'name': 'name', 'type': 'string' }, { 'name': 'owner', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_new', 'type': 'address' }], 'name': 'setOwner', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_addr', 'type': 'address' }, { 'name': '_tla', 'type': 'string' }, { 'name': '_base', 'type': 'uint256' }, { 'name': '_name', 'type': 'string' }], 'name': 'register', 'outputs': [{ 'name': '', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_fee', 'type': 'uint256' }], 'name': 'setFee', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_id', 'type': 'uint256' }, { 'name': '_key', 'type': 'bytes32' }], 'name': 'meta', 'outputs': [{ 'name': '', 'type': 'bytes32' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_addr', 'type': 'address' }, { 'name': '_tla', 'type': 'string' }, { 'name': '_base', 'type': 'uint256' }, { 'name': '_name', 'type': 'string' }, { 'name': '_owner', 'type': 'address' }], 'name': 'registerAs', 'outputs': [{ 'name': '', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_tla', 'type': 'string' }], 'name': 'fromTLA', 'outputs': [{ 'name': 'id', 'type': 'uint256' }, { 'name': 'addr', 'type': 'address' }, { 'name': 'base', 'type': 'uint256' }, { 'name': 'name', 'type': 'string' }, { 'name': 'owner', 'type': 'address' }], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'owner', 'outputs': [{ 'name': '', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [], 'name': 'drain', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'tokenCount', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_id', 'type': 'uint256' }], 'name': 'unregister', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_addr', 'type': 'address' }], 'name': 'fromAddress', 'outputs': [{ 'name': 'id', 'type': 'uint256' }, { 'name': 'tla', 'type': 'string' }, { 'name': 'base', 'type': 'uint256' }, { 'name': 'name', 'type': 'string' }, { 'name': 'owner', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_id', 'type': 'uint256' }, { 'name': '_key', 'type': 'bytes32' }, { 'name': '_value', 'type': 'bytes32' }], 'name': 'setMeta', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'fee', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'tla', 'type': 'string' }, { 'indexed': true, 'name': 'id', 'type': 'uint256' }, { 'indexed': false, 'name': 'addr', 'type': 'address' }, { 'indexed': false, 'name': 'name', 'type': 'string' }], 'name': 'Registered', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'tla', 'type': 'string' }, { 'indexed': true, 'name': 'id', 'type': 'uint256' }], 'name': 'Unregistered', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'id', 'type': 'uint256' }, { 'indexed': true, 'name': 'key', 'type': 'bytes32' }, { 'indexed': false, 'name': 'value', 'type': 'bytes32' }], 'name': 'MetaChanged', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'old', 'type': 'address' }, { 'indexed': true, 'name': 'current', 'type': 'address' }], 'name': 'NewOwner', 'type': 'event' }]); + { + let Registry = web3.eth.contract([{ 'constant': false, 'inputs': [{ 'name': '_new', 'type': 'address' }], 'name': 'setOwner', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'string' }], 'name': 'confirmReverse', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }], 'name': 'reserve', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }, { 'name': '_value', 'type': 'bytes32' }], 'name': 'set', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }], 'name': 'drop', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }], 'name': 'getAddress', 'outputs': [{ 'name': '', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_amount', 'type': 'uint256' }], 'name': 'setFee', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_to', 'type': 'address' }], 'name': 'transfer', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'owner', 'outputs': [{ 'name': '', 'type': 'address' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }], 'name': 'reserved', 'outputs': [{ 'name': 'reserved', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [], 'name': 'drain', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'string' }, { 'name': '_who', 'type': 'address' }], 'name': 'proposeReverse', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }], 'name': 'getUint', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }], 'name': 'get', 'outputs': [{ 'name': '', 'type': 'bytes32' }], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'fee', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '', 'type': 'address' }], 'name': 'reverse', 'outputs': [{ 'name': '', 'type': 'string' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }, { 'name': '_value', 'type': 'uint256' }], 'name': 'setUint', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [], 'name': 'removeReverse', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_name', 'type': 'bytes32' }, { 'name': '_key', 'type': 'string' }, { 'name': '_value', 'type': 'address' }], 'name': 'setAddress', 'outputs': [{ 'name': 'success', 'type': 'bool' }], 'type': 'function' }, { 'anonymous': false, 'inputs': [{ 'indexed': false, 'name': 'amount', 'type': 'uint256' }], 'name': 'Drained', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': false, 'name': 'amount', 'type': 'uint256' }], 'name': 'FeeChanged', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'owner', 'type': 'address' }], 'name': 'Reserved', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'oldOwner', 'type': 'address' }, { 'indexed': true, 'name': 'newOwner', 'type': 'address' }], 'name': 'Transferred', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'owner', 'type': 'address' }], 'name': 'Dropped', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'bytes32' }, { 'indexed': true, 'name': 'owner', 'type': 'address' }, { 'indexed': true, 'name': 'key', 'type': 'string' }, { 'indexed': false, 'name': 'plainKey', 'type': 'string' }], 'name': 'DataChanged', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'string' }, { 'indexed': true, 'name': 'reverse', 'type': 'address' }], 'name': 'ReverseProposed', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'string' }, { 'indexed': true, 'name': 'reverse', 'type': 'address' }], 'name': 'ReverseConfirmed', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'name', 'type': 'string' }, { 'indexed': true, 'name': 'reverse', 'type': 'address' }], 'name': 'ReverseRemoved', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'old', 'type': 'address' }, { 'indexed': true, 'name': 'current', 'type': 'address' }], 'name': 'NewOwner', 'type': 'event' }]); - web3.eth.tokenReg = TokenReg.at(web3.eth.registry.lookupAddress('tokenreg', 'A')); - } - }) - .catch((error) => { - console.error(error); - }); + web3.eth.registry = Registry.at(web3.eth.registryAddress()); + web3.eth.registry.lookup = (name, field) => web3.eth.registry.get(web3.sha3(name), field); + web3.eth.registry.lookupAddress = (name, field) => web3.eth.registry.getAddress(web3.sha3(name), field); + web3.eth.registry.lookupUint = (name, field) => web3.eth.registry.getUint(web3.sha3(name), field); + + let TokenReg = web3.eth.contract([{ 'constant': true, 'inputs': [{ 'name': '_id', 'type': 'uint256' }], 'name': 'token', 'outputs': [{ 'name': 'addr', 'type': 'address' }, { 'name': 'tla', 'type': 'string' }, { 'name': 'base', 'type': 'uint256' }, { 'name': 'name', 'type': 'string' }, { 'name': 'owner', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_new', 'type': 'address' }], 'name': 'setOwner', 'outputs': [], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_addr', 'type': 'address' }, { 'name': '_tla', 'type': 'string' }, { 'name': '_base', 'type': 'uint256' }, { 'name': '_name', 'type': 'string' }], 'name': 'register', 'outputs': [{ 'name': '', 'type': 'bool' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_fee', 'type': 'uint256' }], 'name': 'setFee', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_id', 'type': 'uint256' }, { 'name': '_key', 'type': 'bytes32' }], 'name': 'meta', 'outputs': [{ 'name': '', 'type': 'bytes32' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_addr', 'type': 'address' }, { 'name': '_tla', 'type': 'string' }, { 'name': '_base', 'type': 'uint256' }, { 'name': '_name', 'type': 'string' }, { 'name': '_owner', 'type': 'address' }], 'name': 'registerAs', 'outputs': [{ 'name': '', 'type': 'bool' }], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_tla', 'type': 'string' }], 'name': 'fromTLA', 'outputs': [{ 'name': 'id', 'type': 'uint256' }, { 'name': 'addr', 'type': 'address' }, { 'name': 'base', 'type': 'uint256' }, { 'name': 'name', 'type': 'string' }, { 'name': 'owner', 'type': 'address' }], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'owner', 'outputs': [{ 'name': '', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [], 'name': 'drain', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'tokenCount', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_id', 'type': 'uint256' }], 'name': 'unregister', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [{ 'name': '_addr', 'type': 'address' }], 'name': 'fromAddress', 'outputs': [{ 'name': 'id', 'type': 'uint256' }, { 'name': 'tla', 'type': 'string' }, { 'name': 'base', 'type': 'uint256' }, { 'name': 'name', 'type': 'string' }, { 'name': 'owner', 'type': 'address' }], 'type': 'function' }, { 'constant': false, 'inputs': [{ 'name': '_id', 'type': 'uint256' }, { 'name': '_key', 'type': 'bytes32' }, { 'name': '_value', 'type': 'bytes32' }], 'name': 'setMeta', 'outputs': [], 'type': 'function' }, { 'constant': true, 'inputs': [], 'name': 'fee', 'outputs': [{ 'name': '', 'type': 'uint256' }], 'type': 'function' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'tla', 'type': 'string' }, { 'indexed': true, 'name': 'id', 'type': 'uint256' }, { 'indexed': false, 'name': 'addr', 'type': 'address' }, { 'indexed': false, 'name': 'name', 'type': 'string' }], 'name': 'Registered', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'tla', 'type': 'string' }, { 'indexed': true, 'name': 'id', 'type': 'uint256' }], 'name': 'Unregistered', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'id', 'type': 'uint256' }, { 'indexed': true, 'name': 'key', 'type': 'bytes32' }, { 'indexed': false, 'name': 'value', 'type': 'bytes32' }], 'name': 'MetaChanged', 'type': 'event' }, { 'anonymous': false, 'inputs': [{ 'indexed': true, 'name': 'old', 'type': 'address' }, { 'indexed': true, 'name': 'current', 'type': 'address' }], 'name': 'NewOwner', 'type': 'event' }]); + + web3.eth.tokenReg = TokenReg.at(web3.eth.registry.lookupAddress('tokenreg', 'A')); + } +}) +.catch((error) => { + console.error(error); +}); window.api = api; window.web3 = web3; diff --git a/js/src/dapps/registry/ui/image.js b/js/src/dapps/registry/ui/image.js index 3f0a90abe..88cae4e30 100644 --- a/js/src/dapps/registry/ui/image.js +++ b/js/src/dapps/registry/ui/image.js @@ -16,8 +16,6 @@ import React from 'react'; -import { parityNode } from '../../../environment'; - const styles = { padding: '.5em', border: '1px solid #777' @@ -34,7 +32,7 @@ export default (address) => { return ( { diff --git a/js/src/dapps/tokenreg/Tokens/Token/token.js b/js/src/dapps/tokenreg/Tokens/Token/token.js index 5ea50535c..9d60d4580 100644 --- a/js/src/dapps/tokenreg/Tokens/Token/token.js +++ b/js/src/dapps/tokenreg/Tokens/Token/token.js @@ -30,7 +30,6 @@ import styles from './token.css'; import { metaDataKeys } from '../../constants'; import { api } from '../../parity'; -import { parityNode } from '../../../../environment'; export default class Token extends Component { static propTypes = { @@ -312,7 +311,7 @@ export default class Token extends Component { meta-data:

- +
); diff --git a/js/src/embed.js b/js/src/embed.js index 218bb2f4f..56532257c 100644 --- a/js/src/embed.js +++ b/js/src/embed.js @@ -55,6 +55,9 @@ class FakeTransport { return Promise.reject('not connected'); } + addMiddleware () { + } + on () { } } diff --git a/js/src/environment/index.js b/js/src/environment/index.js index 1123ddd9b..8eaa6519d 100644 --- a/js/src/environment/index.js +++ b/js/src/environment/index.js @@ -19,14 +19,4 @@ import './tests'; -const parityNode = ( - process.env.PARITY_URL && `http://${process.env.PARITY_URL}` - ) || ( - process.env.NODE_ENV === 'production' - ? 'http://127.0.0.1:8545' - : '' - ); - -export { - parityNode -}; +export {}; diff --git a/js/src/index.js b/js/src/index.js index 1436f30c0..9a5c34d3e 100644 --- a/js/src/index.js +++ b/js/src/index.js @@ -53,8 +53,7 @@ if (process.env.NODE_ENV === 'development') { } const AUTH_HASH = '#/auth?'; -const parityUrl = process.env.PARITY_URL || window.location.host; -const urlScheme = window.location.href.match(/^https/) ? 'wss://' : 'ws://'; +const parityUrl = process.env.PARITY_URL || '127.0.0.1:8546'; let token = null; @@ -62,7 +61,7 @@ if (window.location.hash && window.location.hash.indexOf(AUTH_HASH) === 0) { token = qs.parse(window.location.hash.substr(AUTH_HASH.length)).token; } -const api = new SecureApi(`${urlScheme}${parityUrl}`, token); +const api = new SecureApi(parityUrl, token); patchApi(api); loadSender(api); diff --git a/js/src/jsonrpc/interfaces/parity.js b/js/src/jsonrpc/interfaces/parity.js index 6ab5ea212..326ca9831 100644 --- a/js/src/jsonrpc/interfaces/parity.js +++ b/js/src/jsonrpc/interfaces/parity.js @@ -143,25 +143,34 @@ export default { } }, - dappsPort: { - section: SECTION_NODE, - desc: 'Returns the port the dapps are running on, error if not enabled.', + dappsList: { + subdoc: SUBDOC_SET, + desc: 'Returns a list of available local dapps.', params: [], returns: { - type: Quantity, - desc: 'The port number', - example: 8080 + type: Array, + desc: 'The list of dapps', + example: [ + { + author: 'Parity Technologies Ltd', + description: 'A skeleton dapp', + iconUrl: 'title.png', + id: 'skeleton', + name: 'Skeleton', + version: '0.1' + } + ] } }, - dappsInterface: { + dappsUrl: { section: SECTION_NODE, - desc: 'Returns the interface the dapps are running on, error if not enabled.', + desc: 'Returns the hostname and the port of dapps/rpc server, error if not enabled.', params: [], returns: { type: String, - desc: 'The interface', - example: '127.0.0.1' + desc: 'The hostname and port number', + example: 'localhost:8545' } }, @@ -788,17 +797,6 @@ export default { } }, - signerPort: { - section: SECTION_NODE, - desc: 'Returns the port the signer is running on, error if not enabled', - params: [], - returns: { - type: Quantity, - desc: 'The port number', - example: 8180 - } - }, - transactionsLimit: { section: SECTION_MINING, desc: 'Changes limit for transactions in queue.', @@ -1916,6 +1914,17 @@ export default { } }, + wsUrl: { + section: SECTION_NODE, + desc: 'Returns the hostname and the port of WebSockets/Signer server, error if not enabled.', + params: [], + returns: { + type: String, + desc: 'The hostname and port number', + example: 'localhost:8546' + } + }, + composeTransaction: { desc: 'Given partial transaction request produces transaction with all fields filled in. Such transaction can be then signed externally.', params: [ @@ -1997,4 +2006,5 @@ export default { example: 'QmSbFjqjd6nFwNHqsBCC7SK8GShGcayLUEtysJjNGhZAnC' } } + }; diff --git a/js/src/secureApi.js b/js/src/secureApi.js index e19d7ae99..2fd33fb9b 100644 --- a/js/src/secureApi.js +++ b/js/src/secureApi.js @@ -27,21 +27,28 @@ export default class SecureApi extends Api { _needsToken = false; _tokens = []; - _dappsInterface = null; - _dappsPort = 8545; - _signerPort = 8180; + _dappsUrl = null; + _wsUrl = null; - static getTransport (url, sysuiToken) { - return new Api.Transport.Ws(url, sysuiToken, false); + static getTransport (url, sysuiToken, protocol) { + const proto = protocol() === 'https:' ? 'wss:' : 'ws:'; + + return new Api.Transport.Ws(`${proto}//${url}`, sysuiToken, false); } - constructor (url, nextToken, getTransport = SecureApi.getTransport) { + // Returns a protocol with `:` at the end. + static protocol () { + return window.location.protocol; + } + + constructor (url, nextToken, getTransport = SecureApi.getTransport, protocol = SecureApi.protocol) { const sysuiToken = store.get('sysuiToken'); - const transport = getTransport(url, sysuiToken); + const transport = getTransport(url, sysuiToken, protocol); super(transport); - this._url = url; + this._wsUrl = url; + this.protocol = protocol; // Try tokens from localStorage, from hash and 'initial' this._tokens = uniq([sysuiToken, nextToken, 'initial']) .filter((token) => token) @@ -53,12 +60,30 @@ export default class SecureApi extends Api { this.connect(); } + get _dappsAddress () { + if (!this._dappsUrl) { + return { + host: null, + port: 8545 + }; + } + + const [host, port] = this._dappsUrl.split(':'); + + return { + host, + port: parseInt(port, 10) + }; + } + get dappsPort () { - return this._dappsPort; + return this._dappsAddress.port; } get dappsUrl () { - return `http://${this.hostname}:${this.dappsPort}`; + const { port } = this._dappsAddress; + + return `${this.protocol()}//${this.hostname}:${port}`; } get hostname () { @@ -66,15 +91,13 @@ export default class SecureApi extends Api { return 'dapps.parity'; } - if (!this._dappsInterface || this._dappsInterface === '0.0.0.0') { + const { host } = this._dappsAddress; + + if (!host || host === '0.0.0.0') { return window.location.hostname; } - return this._dappsInterface; - } - - get signerPort () { - return this._signerPort; + return host; } get isConnecting () { @@ -98,18 +121,18 @@ export default class SecureApi extends Api { * (`signerPort`, `dappsInterface`, `dappsPort`, ...) */ configure (configuration) { - const { dappsInterface, dappsPort, signerPort } = configuration; + const { dappsInterface, dappsPort, signerPort, wsPort } = configuration; if (dappsInterface) { - this._dappsInterface = dappsInterface; + this._dappsUrl = `${dappsInterface}:${this._dappsAddress.port}`; } if (dappsPort) { - this._dappsPort = dappsPort; + this._dappsUrl = `${this.hostname}:${dappsPort}`; } - if (signerPort) { - this._signerPort = signerPort; + if (signerPort || wsPort) { + this._wsUrl = `${this.hostname}:${signerPort || wsPort}`; } } @@ -166,9 +189,7 @@ export default class SecureApi extends Api { * otherwise (HEAD request to the Node) */ isNodeUp () { - const url = this._url.replace(/wss?/, 'http'); - - return fetch(url, { method: 'HEAD' }) + return fetch(`${this.protocol()}//${this._wsUrl}`, { method: 'HEAD', mode: 'no-cors' }) .then( (r) => r.status === 200, () => false @@ -297,14 +318,12 @@ export default class SecureApi extends Api { _fetchSettings () { return Promise .all([ - this.parity.dappsPort(), - this.parity.dappsInterface(), - this.parity.signerPort() + this.parity.dappsUrl(), + this.parity.wsUrl() ]) - .then(([dappsPort, dappsInterface, signerPort]) => { - this._dappsPort = dappsPort.toNumber(); - this._dappsInterface = dappsInterface; - this._signerPort = signerPort.toNumber(); + .then(([dappsUrl, wsUrl]) => { + this._dappsUrl = dappsUrl; + this._wsUrl = dappsUrl; }); } diff --git a/js/src/util/dapps.js b/js/src/util/dapps.js index 2ca416e1a..58b33e49e 100644 --- a/js/src/util/dapps.js +++ b/js/src/util/dapps.js @@ -25,21 +25,6 @@ import builtinJson from '~/views/Dapps/builtin.json'; const builtinApps = builtinJson.filter((app) => app.id); -function getHost (api) { - const host = process.env.DAPPS_URL || - ( - process.env.NODE_ENV === 'production' - ? api.dappsUrl - : '' - ); - - if (host === '/') { - return ''; - } - - return host; -} - export function subscribeToChanges (api, dappReg, callback) { return dappReg .getContract() @@ -105,12 +90,7 @@ export function fetchBuiltinApps () { } export function fetchLocalApps (api) { - return fetch(`${getHost(api)}/api/apps`) - .then((response) => { - return response.ok - ? response.json() - : []; - }) + return api.parity.dappsList() .then((apps) => { return apps .map((app) => { @@ -195,7 +175,7 @@ export function fetchManifest (api, manifestHash) { } return fetch( - `${getHost(api)}/api/content/${manifestHash}/`, + `/api/content/${manifestHash}/`, { redirect: 'follow', mode: 'cors' } ) .then((response) => { diff --git a/js/src/views/Dapps/dappStore.spec.js b/js/src/views/Dapps/dappStore.spec.js index b08e1f1dc..3b4fb0ded 100644 --- a/js/src/views/Dapps/dappStore.spec.js +++ b/js/src/views/Dapps/dappStore.spec.js @@ -26,17 +26,11 @@ const APPID_DAPPREG = '0x7bbc4f1a27628781b96213e781a1b8eec6982c1db8fac739af6e4c5 const APPID_GHH = '0x058740ee9a5a3fb9f1cfa10752baec87e09cc45cd7027fd54708271aca300c75'; const APPID_LOCALTX = '0xae74ad174b95cdbd01c88ac5b73a296d33e9088fc2a200e76bcedf3a94a7815d'; const APPID_TOKENDEPLOY = '0xf9f2d620c2e08f83e45555247146c62185e4ab7cf82a4b9002a265a0d020348f'; -const FETCH_OK = { - ok: true, - status: 200 -}; let globalContractsGet; -let globalFetch; function stubGlobals () { globalContractsGet = Contracts.get; - globalFetch = global.fetch; Contracts.get = () => { return { @@ -50,31 +44,21 @@ function stubGlobals () { } }; }; - - global.fetch = (url) => { - switch (url) { - case '/api/apps': - return Promise.resolve(Object.assign({}, FETCH_OK, { - json: sinon.stub().resolves([]) // TODO: Local stubs in here - })); - - default: - console.log('Unknown fetch stub endpoint', url); - return Promise.reject(); - } - }; } function restoreGlobals () { Contracts.get = globalContractsGet; - global.fetch = globalFetch; } let api; let store; function create () { - api = {}; + api = { + parity: { + dappsList: () => Promise.resolve([]) + } + }; store = new Store(api); return store; diff --git a/js/src/views/Web/store.spec.js b/js/src/views/Web/store.spec.js index 58b2f1b3c..9f7d2e777 100644 --- a/js/src/views/Web/store.spec.js +++ b/js/src/views/Web/store.spec.js @@ -34,8 +34,8 @@ let store; function createApi () { api = { - dappsPort: 8080, - dappsUrl: 'http://home.web3.site:8080', + dappsPort: 8545, + dappsUrl: 'http://home.web3.site:8545', parity: { listRecentDapps: sinon.stub().resolves(TEST_HISTORY) }, @@ -159,7 +159,7 @@ describe('views/Web/Store', () => { it('encodes current', () => { store.setCurrentUrl(TEST_URL1); expect(store.encodedPath).to.match( - /http:\/\/home\.web3\.site:8080\/web\/DSTPRV1BD1T78W1T5WQQ6VVDCMQ78SBKEGQ68VVDC5MPWBK3DXPG\?t=[0-9]*$/ + /http:\/\/home\.web3\.site:8545\/web\/DSTPRV1BD1T78W1T5WQQ6VVDCMQ78SBKEGQ68VVDC5MPWBK3DXPG\?t=[0-9]*$/ ); }); }); @@ -167,7 +167,7 @@ describe('views/Web/Store', () => { it('encodes current', () => { store.setCurrentUrl(TEST_URL1); expect(store.encodedUrl).to.match( - /^http:\/\/DSTPRV1BD1T78W1T5WQQ6VVDCMQ78SBKEGQ68VVDC5MPWBK3DXPG\.web\.web3\.site:8080\?t=[0-9]*$/ + /^http:\/\/DSTPRV1BD1T78W1T5WQQ6VVDCMQ78SBKEGQ68VVDC5MPWBK3DXPG\.web\.web3\.site:8545\?t=[0-9]*$/ ); }); }); diff --git a/js/webpack/build.server.js b/js/webpack/build.server.js index efc8a2cda..486209ccb 100644 --- a/js/webpack/build.server.js +++ b/js/webpack/build.server.js @@ -15,26 +15,21 @@ // along with Parity. If not, see . // test only /** - * Run `DAPPS_URL="/" PARITY_URL="127.0.0.1:8180" NODE_ENV="production" npm run build` + * Run `DAPPS_URL="/" PARITY_URL="127.0.0.1:8546" NODE_ENV="production" npm run build` * to build the project ; use this server to test that the minifed * version is working (this is a simple proxy server) */ var express = require('express'); -var proxy = require('http-proxy-middleware'); var Shared = require('./shared'); var app = express(); -var wsProxy = proxy('ws://127.0.0.1:8180', { changeOrigin: true }); Shared.addProxies(app); app.use(express.static('.build')); -app.use(wsProxy); var server = app.listen(process.env.PORT || 3000, function () { console.log('Listening on port', server.address().port); }); - -server.on('upgrade', wsProxy.upgrade); diff --git a/js/webpack/dev.server.js b/js/webpack/dev.server.js index 75ea7703a..038945014 100644 --- a/js/webpack/dev.server.js +++ b/js/webpack/dev.server.js @@ -22,7 +22,6 @@ const webpackHotMiddleware = require('webpack-hot-middleware'); const http = require('http'); const express = require('express'); const ProgressBar = require('progress'); -const proxy = require('http-proxy-middleware'); const webpackConfig = require('./app'); const Shared = require('./shared'); @@ -84,18 +83,13 @@ app.use(webpackDevMiddleware(compiler, { } })); -var wsProxy = proxy('ws://127.0.0.1:8180', { changeOrigin: true }); - // Add the dev proxies in the express App Shared.addProxies(app); app.use(express.static(webpackConfig.output.path)); -app.use(wsProxy); const server = http.createServer(app); server.listen(process.env.PORT || 3000, function () { console.log('Listening on port', server.address().port); progressBar = new ProgressBar('[:bar] :percent :etas', { total: 50 }); }); - -server.on('upgrade', wsProxy.upgrade); diff --git a/js/webpack/shared.js b/js/webpack/shared.js index 3e2eef8f1..ded064642 100644 --- a/js/webpack/shared.js +++ b/js/webpack/shared.js @@ -162,16 +162,8 @@ function getDappsEntry () { function addProxies (app) { const proxy = require('http-proxy-middleware'); - app.use(proxy((pathname, req) => { - return pathname === '/' && req.method === 'HEAD'; - }, { - target: 'http://127.0.0.1:8180', - changeOrigin: true, - autoRewrite: true - })); - app.use('/api', proxy({ - target: 'http://127.0.0.1:8545', + target: 'http://127.0.0.1:8180', changeOrigin: true, autoRewrite: true })); diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index b5afb8d0a..cd8f14add 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -124,6 +124,8 @@ usage! { or |c: &Config| otry!(c.ui).port.clone(), flag_ui_interface: String = "local", or |c: &Config| otry!(c.ui).interface.clone(), + flag_ui_hosts: String = "none", + or |c: &Config| otry!(c.ui).hosts.as_ref().map(|vec| vec.join(",")), flag_ui_path: String = "$BASE/signer", or |c: &Config| otry!(c.ui).path.clone(), // NOTE [todr] For security reasons don't put this to config files @@ -188,7 +190,7 @@ usage! { or |c: &Config| otry!(c.websockets).interface.clone(), flag_ws_apis: String = "web3,eth,pubsub,net,parity,parity_pubsub,traces,rpc,secretstore", or |c: &Config| otry!(c.websockets).apis.as_ref().map(|vec| vec.join(",")), - flag_ws_origins: String = "none", + flag_ws_origins: String = "chrome-extension://*", or |c: &Config| otry!(c.websockets).origins.as_ref().map(|vec| vec.join(",")), flag_ws_hosts: String = "none", or |c: &Config| otry!(c.websockets).hosts.as_ref().map(|vec| vec.join(",")), @@ -430,6 +432,7 @@ struct Ui { disable: Option, port: Option, interface: Option, + hosts: Option>, path: Option, } @@ -709,6 +712,7 @@ mod tests { flag_no_ui: false, flag_ui_port: 8180u16, flag_ui_interface: "127.0.0.1".into(), + flag_ui_hosts: "none".into(), flag_ui_path: "$HOME/.parity/signer".into(), flag_ui_no_validation: false, @@ -929,6 +933,7 @@ mod tests { disable: Some(true), port: None, interface: None, + hosts: None, path: None, }), network: Some(Network { diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 34352450a..99f6c7304 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -110,6 +110,11 @@ UI Options: --ui-interface IP Specify the hostname portion of the Trusted UI server, IP should be an interface's IP address, or local (default: {flag_ui_interface}). + --ui-hosts HOSTS List of allowed Host header values. This option will + validate the Host header sent by the browser, it + is additional security against some attack + vectors. Special options: "all", "none", + (default: {flag_ui_hosts}). --ui-path PATH Specify directory where Trusted UIs tokens should be stored. (default: {flag_ui_path}) --ui-no-validation Disable Origin and Host headers validation for diff --git a/parity/configuration.rs b/parity/configuration.rs index 7985c9cd3..ad1353d09 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -30,7 +30,7 @@ use ethcore::client::{VMType}; use ethcore::miner::{MinerOptions, Banning, StratumOptions}; use ethcore::verification::queue::VerifierSettings; -use rpc::{IpcConfiguration, HttpConfiguration, WsConfiguration}; +use rpc::{IpcConfiguration, HttpConfiguration, WsConfiguration, UiConfiguration}; use rpc_apis::ApiSet; use parity_rpc::NetworkSettings; use cache::CacheConfig; @@ -41,7 +41,6 @@ use ethcore_logger::Config as LogConfig; use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path}; use dapps::Configuration as DappsConfiguration; use ipfs::Configuration as IpfsConfiguration; -use signer::{Configuration as SignerConfiguration}; use secretstore::Configuration as SecretStoreConfiguration; use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; use run::RunCmd; @@ -50,8 +49,6 @@ use presale::ImportWallet; use account::{AccountCmd, NewAccount, ListAccounts, ImportAccounts, ImportFromGethAccounts}; use snapshot::{self, SnapshotCommand}; -const AUTHCODE_FILENAME: &'static str = "authcodes"; - #[derive(Debug, PartialEq)] pub enum Cmd { Run(RunCmd), @@ -59,7 +56,7 @@ pub enum Cmd { Account(AccountCmd), ImportPresaleWallet(ImportWallet), Blockchain(BlockchainCmd), - SignerToken(SignerConfiguration), + SignerToken(WsConfiguration, UiConfiguration), SignerSign { id: Option, pwfile: Option, @@ -118,6 +115,7 @@ impl Configuration { let http_conf = self.http_config()?; let ipc_conf = self.ipc_config()?; let net_conf = self.net_config()?; + let ui_conf = self.ui_config(); let network_id = self.network_id(); let cache_config = self.cache_config(); let tracing = self.args.flag_tracing.parse()?; @@ -134,10 +132,8 @@ impl Configuration { let public_node = self.args.flag_public_node; let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let geth_compatibility = self.args.flag_geth; - let ui_address = self.ui_port().map(|port| (self.ui_interface(), port)); let mut dapps_conf = self.dapps_config(); let ipfs_conf = self.ipfs_config(); - let signer_conf = self.signer_config(); let secretstore_conf = self.secretstore_config()?; let format = self.format()?; @@ -149,11 +145,10 @@ impl Configuration { let cmd = if self.args.flag_version { Cmd::Version } else if self.args.cmd_signer { - let mut authfile = PathBuf::from(signer_conf.signer_path.clone()); - authfile.push(AUTHCODE_FILENAME); + let authfile = ::signer::codes_path(&ws_conf.signer_path); if self.args.cmd_new_token { - Cmd::SignerToken(signer_conf) + Cmd::SignerToken(ws_conf, ui_conf) } else if self.args.cmd_sign { let pwfile = self.args.flag_password.get(0).map(|pwfile| { PathBuf::from(pwfile) @@ -161,18 +156,18 @@ impl Configuration { Cmd::SignerSign { id: self.args.arg_id, pwfile: pwfile, - port: signer_conf.port, + port: ws_conf.port, authfile: authfile, } } else if self.args.cmd_reject { Cmd::SignerReject { id: self.args.arg_id, - port: signer_conf.port, + port: ws_conf.port, authfile: authfile, } } else if self.args.cmd_list { Cmd::SignerList { - port: signer_conf.port, + port: ws_conf.port, authfile: authfile, } } else { @@ -372,11 +367,10 @@ impl Configuration { warp_sync: warp_sync, public_node: public_node, geth_compatibility: geth_compatibility, - ui_address: ui_address, net_settings: self.network_settings()?, dapps_conf: dapps_conf, ipfs_conf: ipfs_conf, - signer_conf: signer_conf, + ui_conf: ui_conf, secretstore_conf: secretstore_conf, dapp: self.dapp_to_open()?, ui: self.args.cmd_ui, @@ -553,13 +547,12 @@ impl Configuration { Ok(options) } - fn signer_config(&self) -> SignerConfiguration { - SignerConfiguration { + fn ui_config(&self) -> UiConfiguration { + UiConfiguration { enabled: self.ui_enabled(), - port: self.args.flag_ports_shift + self.args.flag_ui_port, interface: self.ui_interface(), - signer_path: self.directories().signer, - skip_origin_validation: self.args.flag_unsafe_expose || self.args.flag_ui_no_validation, + port: self.args.flag_ports_shift + self.args.flag_ui_port, + hosts: self.ui_hosts(), } } @@ -768,6 +761,14 @@ impl Configuration { Some(hosts) } + fn ui_hosts(&self) -> Option> { + if self.args.flag_ui_no_validation { + return None; + } + + self.hosts(&self.args.flag_ui_hosts, &self.ui_interface()) + } + fn rpc_hosts(&self) -> Option> { self.hosts(&self.args.flag_jsonrpc_hosts, &self.rpc_interface()) } @@ -825,13 +826,17 @@ impl Configuration { } fn ws_config(&self) -> Result { + let ui = self.ui_config(); + let conf = WsConfiguration { enabled: self.ws_enabled(), interface: self.ws_interface(), port: self.args.flag_ports_shift + self.args.flag_ws_port, apis: self.args.flag_ws_apis.parse()?, hosts: self.ws_hosts(), - origins: self.ws_origins() + origins: self.ws_origins(), + signer_path: self.directories().signer.into(), + ui_address: ui.address(), }; Ok(conf) @@ -928,18 +933,6 @@ impl Configuration { } } - fn ui_port(&self) -> Option { - if !self.ui_enabled() { - None - } else { - Some(self.args.flag_ui_port) - } - } - - fn ui_interface(&self) -> String { - self.interface(&self.args.flag_ui_interface) - } - fn interface(&self, interface: &str) -> String { if self.args.flag_unsafe_expose { return "0.0.0.0".into(); @@ -952,6 +945,11 @@ impl Configuration { }.into() } + + fn ui_interface(&self) -> String { + self.interface(&self.args.flag_ui_interface) + } + fn rpc_interface(&self) -> String { let rpc_interface = self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()); self.interface(&rpc_interface) @@ -1050,24 +1048,27 @@ impl Configuration { #[cfg(test)] mod tests { - use super::*; - use cli::Args; - use parity_rpc::NetworkSettings; - use ethcore::client::{VMType, BlockId}; - use ethcore::miner::{MinerOptions, PrioritizationStrategy}; - use helpers::{default_network_config}; - use run::RunCmd; - use dir::{Directories, default_hypervisor_path}; - use signer::{Configuration as SignerConfiguration}; - use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat, ExportState}; - use presale::ImportWallet; - use params::SpecType; - use account::{AccountCmd, NewAccount, ImportAccounts, ListAccounts}; - use devtools::{RandomTempPath}; - use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; use std::io::Write; use std::fs::{File, create_dir}; + use devtools::{RandomTempPath}; + use ethcore::client::{VMType, BlockId}; + use ethcore::miner::{MinerOptions, PrioritizationStrategy}; + use parity_rpc::NetworkSettings; + use updater::{UpdatePolicy, UpdateFilter, ReleaseTrack}; + + use account::{AccountCmd, NewAccount, ImportAccounts, ListAccounts}; + use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat, ExportState}; + use cli::Args; + use dir::{Directories, default_hypervisor_path}; + use helpers::{default_network_config}; + use params::SpecType; + use presale::ImportWallet; + use rpc::{WsConfiguration, UiConfiguration}; + use run::RunCmd; + + use super::*; + #[derive(Debug, PartialEq)] struct TestPasswordReader(&'static str); @@ -1233,12 +1234,20 @@ mod tests { let args = vec!["parity", "signer", "new-token"]; let conf = parse(&args); let expected = Directories::default().signer; - assert_eq!(conf.into_command().unwrap().cmd, Cmd::SignerToken(SignerConfiguration { + assert_eq!(conf.into_command().unwrap().cmd, Cmd::SignerToken(WsConfiguration { + enabled: true, + interface: "127.0.0.1".into(), + port: 8546, + apis: ApiSet::UnsafeContext, + origins: Some(vec!["chrome-extension://*".into()]), + hosts: Some(vec![]), + signer_path: expected.into(), + ui_address: Some(("127.0.0.1".to_owned(), 8180)), + }, UiConfiguration { enabled: true, - signer_path: expected, interface: "127.0.0.1".into(), port: 8180, - skip_origin_validation: false, + hosts: Some(vec![]), })); } @@ -1273,11 +1282,10 @@ mod tests { wal: true, vm_type: Default::default(), geth_compatibility: false, - ui_address: Some(("127.0.0.1".into(), 8180)), net_settings: Default::default(), dapps_conf: Default::default(), ipfs_conf: Default::default(), - signer_conf: Default::default(), + ui_conf: Default::default(), secretstore_conf: Default::default(), ui: false, dapp: None, @@ -1457,7 +1465,7 @@ mod tests { } #[test] - fn should_parse_signer_configration() { + fn should_parse_ui_configuration() { // given // when @@ -1467,33 +1475,33 @@ mod tests { let conf3 = parse(&["parity", "--ui-path", "signer", "--ui-interface", "test"]); // then - assert_eq!(conf0.signer_config(), SignerConfiguration { + assert_eq!(conf0.directories().signer, "signer".to_owned()); + assert_eq!(conf0.ui_config(), UiConfiguration { enabled: true, - port: 8180, interface: "127.0.0.1".into(), - signer_path: "signer".into(), - skip_origin_validation: false, - }); - assert_eq!(conf1.signer_config(), SignerConfiguration { - enabled: true, port: 8180, - interface: "127.0.0.1".into(), - signer_path: "signer".into(), - skip_origin_validation: true, + hosts: Some(vec![]), }); - assert_eq!(conf2.signer_config(), SignerConfiguration { + assert_eq!(conf1.directories().signer, "signer".to_owned()); + assert_eq!(conf1.ui_config(), UiConfiguration { enabled: true, + interface: "127.0.0.1".into(), + port: 8180, + hosts: None, + }); + assert_eq!(conf2.directories().signer, "signer".to_owned()); + assert_eq!(conf2.ui_config(), UiConfiguration { + enabled: true, + interface: "127.0.0.1".into(), port: 3123, - interface: "127.0.0.1".into(), - signer_path: "signer".into(), - skip_origin_validation: false, + hosts: Some(vec![]), }); - assert_eq!(conf3.signer_config(), SignerConfiguration { + assert_eq!(conf3.directories().signer, "signer".to_owned()); + assert_eq!(conf3.ui_config(), UiConfiguration { enabled: true, - port: 8180, interface: "test".into(), - signer_path: "signer".into(), - skip_origin_validation: false, + port: 8180, + hosts: Some(vec![]), }); } @@ -1551,7 +1559,7 @@ mod tests { assert_eq!(conf0.network_settings().unwrap().rpc_port, 8546); assert_eq!(conf0.http_config().unwrap().port, 8546); assert_eq!(conf0.ws_config().unwrap().port, 8547); - assert_eq!(conf0.signer_config().port, 8181); + assert_eq!(conf0.ui_config().port, 8181); assert_eq!(conf0.secretstore_config().unwrap().port, 8084); assert_eq!(conf0.secretstore_config().unwrap().http_port, 8083); assert_eq!(conf0.ipfs_config().port, 5002); @@ -1563,7 +1571,7 @@ mod tests { assert_eq!(conf1.network_settings().unwrap().rpc_port, 8545); assert_eq!(conf1.http_config().unwrap().port, 8545); assert_eq!(conf1.ws_config().unwrap().port, 8547); - assert_eq!(conf1.signer_config().port, 8181); + assert_eq!(conf1.ui_config().port, 8181); assert_eq!(conf1.secretstore_config().unwrap().port, 8084); assert_eq!(conf1.secretstore_config().unwrap().http_port, 8083); assert_eq!(conf1.ipfs_config().port, 5002); @@ -1582,8 +1590,8 @@ mod tests { assert_eq!(conf0.http_config().unwrap().hosts, None); assert_eq!(&conf0.ws_config().unwrap().interface, "0.0.0.0"); assert_eq!(conf0.ws_config().unwrap().hosts, None); - assert_eq!(&conf0.signer_config().interface, "0.0.0.0"); - assert_eq!(conf0.signer_config().skip_origin_validation, true); + assert_eq!(&conf0.ui_config().interface, "0.0.0.0"); + assert_eq!(conf0.ui_config().hosts, None); assert_eq!(&conf0.secretstore_config().unwrap().interface, "0.0.0.0"); assert_eq!(&conf0.secretstore_config().unwrap().http_interface, "0.0.0.0"); assert_eq!(&conf0.ipfs_config().interface, "0.0.0.0"); diff --git a/parity/dapps.rs b/parity/dapps.rs index 324e40403..f0ae06c6b 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -27,6 +27,7 @@ use hash_fetch::urlhint::ContractClient; use helpers::replace_home; use light::client::Client as LightClient; use light::on_demand::{self, OnDemand}; +use rpc; use rpc_apis::SignerService; use parity_reactor; use util::{Bytes, Address}; @@ -49,6 +50,15 @@ impl Default for Configuration { } } +impl Configuration { + pub fn address(&self, address: Option<(String, u16)>) -> Option<(String, u16)> { + match self.enabled { + true => address, + false => None, + } + } +} + /// Registrar implementation of the full client. pub struct FullRegistrar { /// Handle to the full client. @@ -125,35 +135,49 @@ impl ContractClient for LightRegistrar { // TODO: light client implementation forwarding to OnDemand and waiting for future // to resolve. +#[derive(Clone)] pub struct Dependencies { pub sync_status: Arc, pub contract_client: Arc, pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, + pub ui_address: Option<(String, u16)>, } -pub fn new(configuration: Configuration, deps: Dependencies) - -> Result, String> -{ +pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { if !configuration.enabled { return Ok(None); } - dapps_middleware( + server::dapps_middleware( deps, configuration.dapps_path, configuration.extra_dapps, + rpc::DAPPS_DOMAIN.into(), ).map(Some) } -pub use self::server::{SyncStatus, Middleware, dapps_middleware}; +pub fn new_ui(enabled: bool, deps: Dependencies) -> Result, String> { + if !enabled { + return Ok(None); + } + + server::ui_middleware( + deps, + rpc::DAPPS_DOMAIN.into(), + ).map(Some) +} + +pub use self::server::{SyncStatus, Middleware, service}; #[cfg(not(feature = "dapps"))] mod server { use super::Dependencies; + use std::sync::Arc; use std::path::PathBuf; use parity_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction}; + use rpc_apis; pub type SyncStatus = Fn() -> bool; @@ -170,9 +194,21 @@ mod server { _deps: Dependencies, _dapps_path: PathBuf, _extra_dapps: Vec, + _dapps_domain: String, ) -> Result { Err("Your Parity version has been compiled without WebApps support.".into()) } + + pub fn ui_middleware( + _deps: Dependencies, + _dapps_domain: String, + ) -> Result { + Err("Your Parity version has been compiled without UI support.".into()) + } + + pub fn service(_: &Option) -> Option> { + None + } } #[cfg(feature = "dapps")] @@ -180,6 +216,7 @@ mod server { use super::Dependencies; use std::path::PathBuf; use std::sync::Arc; + use rpc_apis; use parity_dapps; use parity_reactor; @@ -191,20 +228,62 @@ mod server { deps: Dependencies, dapps_path: PathBuf, extra_dapps: Vec, + dapps_domain: String, ) -> Result { - let signer = deps.signer.clone(); + let signer = deps.signer; let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); let web_proxy_tokens = Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token)); - Ok(parity_dapps::Middleware::new( + Ok(parity_dapps::Middleware::dapps( parity_remote, - deps.signer.address(), + deps.ui_address, dapps_path, extra_dapps, + dapps_domain, deps.contract_client, deps.sync_status, web_proxy_tokens, - deps.fetch.clone(), + deps.fetch, )) } + + pub fn ui_middleware( + deps: Dependencies, + dapps_domain: String, + ) -> Result { + let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); + Ok(parity_dapps::Middleware::ui( + parity_remote, + deps.contract_client, + deps.sync_status, + deps.fetch, + dapps_domain, + )) + } + + pub fn service(middleware: &Option) -> Option> { + middleware.as_ref().map(|m| Arc::new(DappsServiceWrapper { + endpoints: m.endpoints() + }) as Arc) + } + + pub struct DappsServiceWrapper { + endpoints: parity_dapps::Endpoints, + } + + impl rpc_apis::DappsService for DappsServiceWrapper { + fn list_dapps(&self) -> Vec { + self.endpoints.list() + .into_iter() + .map(|app| rpc_apis::LocalDapp { + id: app.id, + name: app.name, + description: app.description, + version: app.version, + author: app.author, + icon_url: app.icon_url, + }) + .collect() + } + } } diff --git a/parity/main.rs b/parity/main.rs index c61d414be..c05c9ba59 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -51,7 +51,6 @@ extern crate ethcore_ipc_hypervisor as hypervisor; extern crate ethcore_ipc_nano as nanoipc; extern crate ethcore_light as light; extern crate ethcore_logger; -extern crate ethcore_signer; extern crate ethcore_util as util; extern crate ethkey; extern crate ethsync; @@ -114,9 +113,9 @@ mod presale; mod rpc; mod rpc_apis; mod run; +mod secretstore; mod signer; mod snapshot; -mod secretstore; mod upgrade; mod url; mod user_defaults; @@ -170,7 +169,7 @@ fn execute(command: Execute, can_restart: bool) -> Result account::execute(account_cmd).map(|s| PostExecutionAction::Print(s)), Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd).map(|s| PostExecutionAction::Print(s)), Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd).map(|_| PostExecutionAction::Quit), - Cmd::SignerToken(signer_cmd) => signer::execute(signer_cmd).map(|s| PostExecutionAction::Print(s)), + Cmd::SignerToken(ws_conf, ui_conf) => signer::execute(ws_conf, ui_conf).map(|s| PostExecutionAction::Print(s)), Cmd::SignerSign { id, pwfile, port, authfile } => rpc_cli::signer_sign(id, pwfile, port, authfile).map(|s| PostExecutionAction::Print(s)), Cmd::SignerList { port, authfile } => rpc_cli::signer_list(port, authfile).map(|s| PostExecutionAction::Print(s)), Cmd::SignerReject { id, port, authfile } => rpc_cli::signer_reject(id, port, authfile).map(|s| PostExecutionAction::Print(s)), diff --git a/parity/rpc.rs b/parity/rpc.rs index ae0e08858..66dcbcaf7 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -16,18 +16,24 @@ use std::io; use std::sync::Arc; +use std::path::PathBuf; +use std::collections::HashSet; use dapps; -use parity_rpc::informant::{RpcStats, Middleware}; -use parity_rpc::{self as rpc, HttpServerError, Metadata, Origin, DomainsValidation}; -use helpers::parity_ipc_path; +use dir::default_data_path; +use helpers::{parity_ipc_path, replace_home}; use jsonrpc_core::MetaIoHandler; use parity_reactor::TokioRemote; +use parity_rpc::informant::{RpcStats, Middleware}; +use parity_rpc::{self as rpc, Metadata, DomainsValidation}; use rpc_apis::{self, ApiSet}; pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware}; pub use parity_rpc::ws::Server as WsServer; + +pub const DAPPS_DOMAIN: &'static str = "web3.site"; + #[derive(Debug, Clone, PartialEq)] pub struct HttpConfiguration { pub enabled: bool, @@ -39,6 +45,15 @@ pub struct HttpConfiguration { pub threads: Option, } +impl HttpConfiguration { + pub fn address(&self) -> Option<(String, u16)> { + match self.enabled { + true => Some((self.interface.clone(), self.port)), + false => None, + } + } +} + impl Default for HttpConfiguration { fn default() -> Self { HttpConfiguration { @@ -53,6 +68,48 @@ impl Default for HttpConfiguration { } } +#[derive(Debug, PartialEq, Clone)] +pub struct UiConfiguration { + pub enabled: bool, + pub interface: String, + pub port: u16, + pub hosts: Option>, +} + +impl UiConfiguration { + pub fn address(&self) -> Option<(String, u16)> { + match self.enabled { + true => Some((self.interface.clone(), self.port)), + false => None, + } + } +} + +impl From for HttpConfiguration { + fn from(conf: UiConfiguration) -> Self { + HttpConfiguration { + enabled: conf.enabled, + interface: conf.interface, + port: conf.port, + apis: rpc_apis::ApiSet::SafeContext, + cors: None, + hosts: conf.hosts, + threads: None, + } + } +} + +impl Default for UiConfiguration { + fn default() -> Self { + UiConfiguration { + enabled: true, + port: 8180, + interface: "127.0.0.1".into(), + hosts: Some(vec![]), + } + } +} + #[derive(Debug, PartialEq)] pub struct IpcConfiguration { pub enabled: bool, @@ -75,7 +132,7 @@ impl Default for IpcConfiguration { } } -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct WsConfiguration { pub enabled: bool, pub interface: String, @@ -83,17 +140,32 @@ pub struct WsConfiguration { pub apis: ApiSet, pub origins: Option>, pub hosts: Option>, + pub signer_path: PathBuf, + pub ui_address: Option<(String, u16)>, } impl Default for WsConfiguration { fn default() -> Self { + let data_dir = default_data_path(); WsConfiguration { enabled: true, interface: "127.0.0.1".into(), port: 8546, apis: ApiSet::UnsafeContext, - origins: Some(Vec::new()), + origins: Some(vec!["chrome-extension://*".into()]), hosts: Some(Vec::new()), + signer_path: replace_home(&data_dir, "$BASE/signer").into(), + ui_address: Some(("127.0.0.1".to_owned(), 8180)), + } + } +} + + +impl WsConfiguration { + pub fn address(&self) -> Option<(String, u16)> { + match self.enabled { + true => Some((self.interface.clone(), self.port)), + false => None, } } } @@ -104,62 +176,6 @@ pub struct Dependencies { pub stats: Arc, } -pub struct RpcExtractor; -impl rpc::HttpMetaExtractor for RpcExtractor { - type Metadata = Metadata; - - fn read_metadata(&self, origin: String, dapps_origin: Option) -> Metadata { - let mut metadata = Metadata::default(); - - metadata.origin = match (origin.as_str(), dapps_origin) { - ("null", Some(dapp)) => Origin::Dapps(dapp.into()), - _ => Origin::Rpc(origin), - }; - - metadata - } -} - -impl rpc::IpcMetaExtractor for RpcExtractor { - fn extract(&self, _req: &rpc::IpcRequestContext) -> Metadata { - let mut metadata = Metadata::default(); - // TODO [ToDr] Extract proper session id when it's available in context. - metadata.origin = Origin::Ipc(1.into()); - metadata - } -} - -struct WsRpcExtractor; -impl rpc::ws::MetaExtractor for WsRpcExtractor { - fn extract(&self, req: &rpc::ws::RequestContext) -> Metadata { - let mut metadata = Metadata::default(); - let id = req.session_id as u64; - metadata.origin = Origin::Ws(id.into()); - metadata.session = Some(Arc::new(rpc::PubSubSession::new(req.sender()))); - metadata - } -} - -struct WsStats { - stats: Arc, -} - -impl rpc::ws::SessionStats for WsStats { - fn open_session(&self, _id: rpc::ws::SessionId) { - self.stats.open_session() - } - - fn close_session(&self, _id: rpc::ws::SessionId) { - self.stats.close_session() - } -} - -fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler> - where D: rpc_apis::Dependencies -{ - rpc_apis::setup_rpc(deps.stats.clone(), &*deps.apis, apis) -} - pub fn new_ws( conf: WsConfiguration, deps: &Dependencies, @@ -168,23 +184,41 @@ pub fn new_ws( return Ok(None); } - let url = format!("{}:{}", conf.interface, conf.port); + let domain = DAPPS_DOMAIN; + let ws_address = (conf.interface, conf.port); + let url = format!("{}:{}", ws_address.0, ws_address.1); let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?; - let handler = setup_apis(conf.apis, deps); - let remote = deps.remote.clone(); - let allowed_origins = into_domains(conf.origins); - let allowed_hosts = into_domains(conf.hosts); + + let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps); + let handler = { + let mut handler = MetaIoHandler::with_middleware(( + rpc::WsDispatcher::new(full_handler), + Middleware::new(deps.stats.clone(), deps.apis.activity_notifier()) + )); + let apis = conf.apis.list_apis().into_iter().collect::>(); + deps.apis.extend_with_set(&mut handler, &apis); + + handler + }; + + let remote = deps.remote.clone(); + let ui_address = conf.ui_address.clone(); + let allowed_origins = into_domains(with_domain(conf.origins, domain, &[ui_address])); + let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &[Some(ws_address)])); + + let signer_path = conf.signer_path; + let signer_path = conf.ui_address.map(move |_| ::signer::codes_path(&signer_path)); + let path = signer_path.as_ref().map(|p| p.as_path()); let start_result = rpc::start_ws( &addr, handler, remote.clone(), allowed_origins, allowed_hosts, - WsRpcExtractor, - WsStats { - stats: deps.stats.clone(), - }, + rpc::WsExtractor::new(path.clone()), + rpc::WsExtractor::new(path.clone()), + rpc::WsStats::new(deps.stats.clone()), ); match start_result { @@ -197,21 +231,25 @@ pub fn new_ws( } pub fn new_http( + id: &str, + options: &str, conf: HttpConfiguration, deps: &Dependencies, - middleware: Option + middleware: Option, ) -> Result, String> { if !conf.enabled { return Ok(None); } - let url = format!("{}:{}", conf.interface, conf.port); - let addr = url.parse().map_err(|_| format!("Invalid HTTP JSON-RPC listen host/port given: {}", url))?; + let domain = DAPPS_DOMAIN; + let http_address = (conf.interface, conf.port); + let url = format!("{}:{}", http_address.0, http_address.1); + let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?; let handler = setup_apis(conf.apis, deps); let remote = deps.remote.clone(); let cors_domains = into_domains(conf.cors); - let allowed_hosts = into_domains(conf.hosts); + let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &[Some(http_address)])); let start_result = rpc::start_http( &addr, @@ -219,7 +257,7 @@ pub fn new_http( allowed_hosts, handler, remote, - RpcExtractor, + rpc::RpcExtractor, match (conf.threads, middleware) { (Some(threads), None) => rpc::HttpSettings::Threads(threads), (None, middleware) => rpc::HttpSettings::Dapps(middleware), @@ -231,17 +269,13 @@ pub fn new_http( match start_result { Ok(server) => Ok(Some(server)), - Err(HttpServerError::Io(ref err)) if err.kind() == io::ErrorKind::AddrInUse => Err( - format!("HTTP address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url) + Err(rpc::HttpServerError::Io(ref err)) if err.kind() == io::ErrorKind::AddrInUse => Err( + format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options) ), - Err(e) => Err(format!("HTTP error: {:?}", e)), + Err(e) => Err(format!("{} error: {:?}", id, e)), } } -fn into_domains>(items: Option>) -> DomainsValidation { - items.map(|vals| vals.into_iter().map(T::from).collect()).into() -} - pub fn new_ipc( conf: IpcConfiguration, dependencies: &Dependencies @@ -252,48 +286,39 @@ pub fn new_ipc( let handler = setup_apis(conf.apis, dependencies); let remote = dependencies.remote.clone(); - let ipc = rpc::start_ipc( - &conf.socket_addr, - handler, - remote, - RpcExtractor, - ); - - match ipc { + match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) { Ok(server) => Ok(Some(server)), Err(io_error) => Err(format!("IPC error: {}", io_error)), } } -#[cfg(test)] -mod tests { - use super::RpcExtractor; - use parity_rpc::{HttpMetaExtractor, Origin}; - - #[test] - fn should_extract_rpc_origin() { - // given - let extractor = RpcExtractor; - - // when - let meta = extractor.read_metadata("http://parity.io".into(), None); - let meta1 = extractor.read_metadata("http://parity.io".into(), Some("ignored".into())); - - // then - assert_eq!(meta.origin, Origin::Rpc("http://parity.io".into())); - assert_eq!(meta1.origin, Origin::Rpc("http://parity.io".into())); - } - - #[test] - fn should_dapps_origin() { - // given - let extractor = RpcExtractor; - let dapp = "https://wallet.ethereum.org".to_owned(); - - // when - let meta = extractor.read_metadata("null".into(), Some(dapp.clone())); - - // then - assert_eq!(meta.origin, Origin::Dapps(dapp.into())); - } +fn into_domains>(items: Option>) -> DomainsValidation { + items.map(|vals| vals.into_iter().map(T::from).collect()).into() +} + +fn with_domain(items: Option>, domain: &str, addresses: &[Option<(String, u16)>]) -> Option> { + items.map(move |items| { + let mut items = items.into_iter().collect::>(); + for address in addresses { + if let Some((host, port)) = address.clone() { + items.insert(format!("{}:{}", host, port)); + items.insert(format!("{}:{}", host.replace("127.0.0.1", "localhost"), port)); + items.insert(format!("http://*.{}:{}", domain, port)); + items.insert(format!("http://*.{}", domain)); //proxypac + } + } + items.into_iter().collect() + }) +} + +fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler> + where D: rpc_apis::Dependencies +{ + let mut handler = MetaIoHandler::with_middleware( + Middleware::new(deps.stats.clone(), deps.apis.activity_notifier()) + ); + let apis = apis.list_apis().into_iter().collect::>(); + deps.apis.extend_with_set(&mut handler, &apis); + + handler } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index d456a0fff..78f9de03a 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -20,14 +20,15 @@ use std::collections::HashSet; use std::str::FromStr; use std::sync::{Arc, Weak}; -pub use parity_rpc::SignerService; +pub use parity_rpc::signer::SignerService; +pub use parity_rpc::dapps::{DappsService, LocalDapp}; use ethcore::account_provider::AccountProvider; use ethcore::client::Client; use ethcore::miner::{Miner, ExternalMiner}; use ethcore::snapshot::SnapshotService; use parity_rpc::{Metadata, NetworkSettings}; -use parity_rpc::informant::{ActivityNotifier, Middleware, RpcStats, ClientNotifier}; +use parity_rpc::informant::{ActivityNotifier, ClientNotifier}; use parity_rpc::dispatch::{FullDispatcher, LightDispatcher}; use ethsync::{ManageNetwork, SyncProvider, LightSync}; use hash_fetch::fetch::Client as FetchClient; @@ -183,7 +184,11 @@ pub trait Dependencies { fn activity_notifier(&self) -> Self::Notifier; /// Extend the given I/O handler with endpoints for each API. - fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]); + fn extend_with_set( + &self, + handler: &mut MetaIoHandler, + apis: &[Api], + ) where S: core::Middleware; } /// RPC dependencies for a full node. @@ -201,19 +206,20 @@ pub struct FullDependencies { pub net_service: Arc, pub updater: Arc, pub geth_compatibility: bool, - pub dapps_interface: Option, - pub dapps_port: Option, + pub dapps_service: Option>, + pub dapps_address: Option<(String, u16)>, + pub ws_address: Option<(String, u16)>, pub fetch: FetchClient, pub remote: parity_reactor::Remote, } impl FullDependencies { - fn extend_api>( + fn extend_api( &self, - handler: &mut MetaIoHandler, + handler: &mut MetaIoHandler, apis: &[Api], for_generic_pubsub: bool, - ) { + ) where S: core::Middleware { use parity_rpc::v1::*; macro_rules! add_signing_methods { @@ -288,8 +294,8 @@ impl FullDependencies { self.logger.clone(), self.settings.clone(), signer, - self.dapps_interface.clone(), - self.dapps_port, + self.dapps_address.clone(), + self.ws_address.clone(), ).to_delegate()); if !for_generic_pubsub { @@ -312,6 +318,7 @@ impl FullDependencies { &self.miner, &self.updater, &self.net_service, + self.dapps_service.clone(), self.fetch.clone(), ).to_delegate()) }, @@ -339,7 +346,11 @@ impl Dependencies for FullDependencies { } } - fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]) { + fn extend_with_set( + &self, + handler: &mut MetaIoHandler, + apis: &[Api], + ) where S: core::Middleware { self.extend_api(handler, apis, false) } } @@ -363,8 +374,9 @@ pub struct LightDependencies { pub on_demand: Arc<::light::on_demand::OnDemand>, pub cache: Arc>, pub transaction_queue: Arc>, - pub dapps_interface: Option, - pub dapps_port: Option, + pub dapps_service: Option>, + pub dapps_address: Option<(String, u16)>, + pub ws_address: Option<(String, u16)>, pub fetch: FetchClient, pub geth_compatibility: bool, pub remote: parity_reactor::Remote, @@ -457,8 +469,8 @@ impl LightDependencies { self.logger.clone(), self.settings.clone(), signer, - self.dapps_interface.clone(), - self.dapps_port, + self.dapps_address.clone(), + self.ws_address.clone(), ).to_delegate()); if !for_generic_pubsub { @@ -479,6 +491,7 @@ impl LightDependencies { Api::ParitySet => { handler.extend_with(light::ParitySetClient::new( self.sync.clone(), + self.dapps_service.clone(), self.fetch.clone(), ).to_delegate()) }, @@ -502,7 +515,12 @@ impl Dependencies for LightDependencies { type Notifier = LightClientNotifier; fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } - fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]) { + + fn extend_with_set( + &self, + handler: &mut MetaIoHandler, + apis: &[Api], + ) where S: core::Middleware { self.extend_api(handler, apis, false) } } @@ -552,15 +570,6 @@ impl ApiSet { } } -pub fn setup_rpc(stats: Arc, deps: &D, apis: ApiSet) -> MetaIoHandler> { - let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, deps.activity_notifier())); - // it's turned into vector, cause ont of the cases requires &[] - let apis = apis.list_apis().into_iter().collect::>(); - deps.extend_with_set(&mut handler, &apis[..]); - - handler -} - #[cfg(test)] mod test { use super::{Api, ApiSet}; diff --git a/parity/run.rs b/parity/run.rs index 0f45e585c..352f11c5d 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -49,11 +49,11 @@ use cache::CacheConfig; use user_defaults::UserDefaults; use dapps; use ipfs; -use signer; -use secretstore; use modules; -use rpc_apis; use rpc; +use rpc_apis; +use secretstore; +use signer; use url; // how often to take periodic snapshots. @@ -99,11 +99,10 @@ pub struct RunCmd { pub wal: bool, pub vm_type: VMType, pub geth_compatibility: bool, - pub ui_address: Option<(String, u16)>, pub net_settings: NetworkSettings, pub dapps_conf: dapps::Configuration, pub ipfs_conf: ipfs::Configuration, - pub signer_conf: signer::Configuration, + pub ui_conf: rpc::UiConfiguration, pub secretstore_conf: secretstore::Configuration, pub dapp: Option, pub ui: bool, @@ -119,12 +118,12 @@ pub struct RunCmd { pub no_persistent_txqueue: bool, } -pub fn open_ui(signer_conf: &signer::Configuration) -> Result<(), String> { - if !signer_conf.enabled { +pub fn open_ui(ws_conf: &rpc::WsConfiguration, ui_conf: &rpc::UiConfiguration) -> Result<(), String> { + if !ui_conf.enabled { return Err("Cannot use UI command with UI turned off.".into()) } - let token = signer::generate_token_and_url(signer_conf)?; + let token = signer::generate_token_and_url(ws_conf, ui_conf)?; // Open a browser url::open(&token.url); // Print a message @@ -195,7 +194,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?; // create dirs used by parity - cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled, cmd.secretstore_conf.enabled)?; + cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.ui_conf.enabled, cmd.secretstore_conf.enabled)?; info!("Starting {}", Colour::White.bold().paint(version())); info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client")); @@ -267,31 +266,47 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> // prepare account provider let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); let rpc_stats = Arc::new(informant::RpcStats::default()); - let signer_path = cmd.signer_conf.signer_path.clone(); + + // the dapps server + let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.ui_conf)); + let dapps_deps = { + let contract_client = Arc::new(::dapps::LightRegistrar { + client: service.client().clone(), + sync: light_sync.clone(), + on_demand: on_demand.clone(), + }); + + let sync = light_sync.clone(); + dapps::Dependencies { + sync_status: Arc::new(move || sync.is_major_importing()), + contract_client: contract_client, + remote: event_loop.raw_remote(), + fetch: fetch.clone(), + signer: signer_service.clone(), + ui_address: cmd.ui_conf.address(), + } + }; + + let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps.clone())?; + let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, dapps_deps)?; // start RPCs + let dapps_service = dapps::service(&dapps_middleware); let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { - signer_service: Arc::new(rpc_apis::SignerService::new(move || { - signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) - }, cmd.ui_address)), + signer_service: signer_service, client: service.client().clone(), sync: light_sync.clone(), net: light_sync.clone(), secret_store: account_provider, logger: logger, settings: Arc::new(cmd.net_settings), - on_demand: on_demand.clone(), + on_demand: on_demand, cache: cache, transaction_queue: txq, - dapps_interface: match cmd.dapps_conf.enabled { - true => Some(cmd.http_conf.interface.clone()), - false => None, - }, - dapps_port: match cmd.dapps_conf.enabled { - true => Some(cmd.http_conf.port), - false => None, - }, - fetch: fetch.clone(), + dapps_service: dapps_service, + dapps_address: cmd.dapps_conf.address(cmd.http_conf.address()), + ws_address: cmd.ws_conf.address(), + fetch: fetch, geth_compatibility: cmd.geth_compatibility, remote: event_loop.remote(), }); @@ -302,39 +317,11 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> stats: rpc_stats.clone(), }; - // the dapps server - let dapps_deps = { - let contract_client = Arc::new(::dapps::LightRegistrar { - client: service.client().clone(), - sync: light_sync.clone(), - on_demand: on_demand, - }); - - let sync = light_sync.clone(); - dapps::Dependencies { - sync_status: Arc::new(move || sync.is_major_importing()), - contract_client: contract_client, - remote: event_loop.raw_remote(), - fetch: fetch, - signer: deps_for_rpc_apis.signer_service.clone(), - } - }; - - let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; - // start rpc servers let _ws_server = rpc::new_ws(cmd.ws_conf, &dependencies)?; - let _http_server = rpc::new_http(cmd.http_conf.clone(), &dependencies, dapps_middleware)?; + let _http_server = rpc::new_http("HTTP JSON-RPC", "jsonrpc", cmd.http_conf.clone(), &dependencies, dapps_middleware)?; let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; - - // the signer server - let signer_deps = signer::Dependencies { - apis: deps_for_rpc_apis.clone(), - remote: event_loop.raw_remote(), - rpc_stats: rpc_stats.clone(), - }; - let signing_queue = deps_for_rpc_apis.signer_service.queue(); - let _signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; + let _ui_server = rpc::new_http("Parity Wallet (UI)", "ui", cmd.ui_conf.clone().into(), &dependencies, ui_middleware)?; // minimal informant thread. Just prints block number every 5 seconds. // TODO: integrate with informant.rs @@ -351,9 +338,9 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running - let addr = format!("{}:{}", cmd.signer_conf.interface, cmd.signer_conf.port); + let addr = format!("{}:{}", cmd.ui_conf.interface, cmd.ui_conf.port); if !TcpListener::bind(&addr as &str).is_ok() { - return open_ui(&cmd.signer_conf).map(|_| (false, None)); + return open_ui(&cmd.ws_conf, &cmd.ui_conf).map(|_| (false, None)); } } @@ -408,7 +395,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?; // create dirs used by parity - cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled, cmd.secretstore_conf.enabled)?; + cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.ui_conf.enabled, cmd.secretstore_conf.enabled)?; // run in daemon mode if let Some(pid_file) = cmd.daemon { @@ -620,16 +607,33 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // set up dependencies for rpc servers let rpc_stats = Arc::new(informant::RpcStats::default()); - let signer_path = cmd.signer_conf.signer_path.clone(); let secret_store = match cmd.public_node { true => None, false => Some(account_provider.clone()) }; + let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.ui_conf)); + + // the dapps server + let dapps_deps = { + let (sync, client) = (sync_provider.clone(), client.clone()); + let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() }); + + dapps::Dependencies { + sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())), + contract_client: contract_client, + remote: event_loop.raw_remote(), + fetch: fetch.clone(), + signer: signer_service.clone(), + ui_address: cmd.ui_conf.address(), + } + }; + let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps.clone())?; + let ui_middleware = dapps::new_ui(cmd.ui_conf.enabled, dapps_deps)?; + + let dapps_service = dapps::service(&dapps_middleware); let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies { - signer_service: Arc::new(rpc_apis::SignerService::new(move || { - signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) - }, cmd.ui_address)), + signer_service: signer_service, snapshot: snapshot_service.clone(), client: client.clone(), sync: sync_provider.clone(), @@ -642,14 +646,9 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R net_service: manage_network.clone(), updater: updater.clone(), geth_compatibility: cmd.geth_compatibility, - dapps_interface: match cmd.dapps_conf.enabled { - true => Some(cmd.http_conf.interface.clone()), - false => None, - }, - dapps_port: match cmd.dapps_conf.enabled { - true => Some(cmd.http_conf.port), - false => None, - }, + dapps_service: dapps_service, + dapps_address: cmd.dapps_conf.address(cmd.http_conf.address()), + ws_address: cmd.ws_conf.address(), fetch: fetch.clone(), remote: event_loop.remote(), }); @@ -660,34 +659,12 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R stats: rpc_stats.clone(), }; - // the dapps server - let dapps_deps = { - let (sync, client) = (sync_provider.clone(), client.clone()); - let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() }); - - dapps::Dependencies { - sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())), - contract_client: contract_client, - remote: event_loop.raw_remote(), - fetch: fetch.clone(), - signer: deps_for_rpc_apis.signer_service.clone(), - } - }; - let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; - // start rpc servers - let ws_server = rpc::new_ws(cmd.ws_conf, &dependencies)?; - let http_server = rpc::new_http(cmd.http_conf.clone(), &dependencies, dapps_middleware)?; + let ws_server = rpc::new_ws(cmd.ws_conf.clone(), &dependencies)?; let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; - - // the signer server - let signer_deps = signer::Dependencies { - apis: deps_for_rpc_apis.clone(), - remote: event_loop.raw_remote(), - rpc_stats: rpc_stats.clone(), - }; - let signing_queue = deps_for_rpc_apis.signer_service.queue(); - let signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; + let http_server = rpc::new_http("HTTP JSON-RPC", "jsonrpc", cmd.http_conf.clone(), &dependencies, dapps_middleware)?; + // the ui server + let ui_server = rpc::new_http("UI WALLET", "ui", cmd.ui_conf.clone().into(), &dependencies, ui_middleware)?; // secret store key server let secretstore_deps = secretstore::Dependencies { @@ -746,7 +723,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // start ui if cmd.ui { - open_ui(&cmd.signer_conf)?; + open_ui(&cmd.ws_conf, &cmd.ui_conf)?; } if let Some(dapp) = cmd.dapp { @@ -756,11 +733,11 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // Handle exit let restart = wait_for_exit(panic_handler, Some(updater), Some(client), can_restart); - // drop this stuff as soon as exit detected. - drop((ws_server, http_server, ipc_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); - info!("Finishing work, please wait..."); + // drop this stuff as soon as exit detected. + drop((ws_server, http_server, ipc_server, ui_server, secretstore_key_server, ipfs_server, event_loop)); + // to make sure timer does not spawn requests while shutdown is in progress informant.shutdown(); // just Arc is dropping here, to allow other reference release in its default time diff --git a/parity/signer.rs b/parity/signer.rs index 7f800f0e0..7a5e90341 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -15,51 +15,16 @@ // along with Parity. If not, see . use std::io; -use std::path::PathBuf; -use std::sync::Arc; - -pub use ethcore_signer::Server as SignerServer; +use std::path::{Path, PathBuf}; use ansi_term::Colour; -use dir::default_data_path; -use parity_rpc::informant::RpcStats; -use parity_rpc::{self, ConfirmationsQueue}; -use ethcore_signer as signer; -use helpers::replace_home; -use parity_reactor::TokioRemote; +use rpc; use rpc_apis; +use parity_rpc; use path::restrict_permissions_owner; -use util::H256; -const CODES_FILENAME: &'static str = "authcodes"; -#[derive(Debug, PartialEq, Clone)] -pub struct Configuration { - pub enabled: bool, - pub port: u16, - pub interface: String, - pub signer_path: String, - pub skip_origin_validation: bool, -} - -impl Default for Configuration { - fn default() -> Self { - let data_dir = default_data_path(); - Configuration { - enabled: true, - port: 8180, - interface: "127.0.0.1".into(), - signer_path: replace_home(&data_dir, "$BASE/signer"), - skip_origin_validation: false, - } - } -} - -pub struct Dependencies { - pub apis: Arc, - pub remote: TokioRemote, - pub rpc_stats: Arc, -} +pub const CODES_FILENAME: &'static str = "authcodes"; pub struct NewToken { pub token: String, @@ -67,42 +32,29 @@ pub struct NewToken { pub message: String, } -#[derive(Debug, Default, Clone)] -pub struct StandardExtractor; -impl signer::MetaExtractor for StandardExtractor { - fn extract_metadata(&self, session: &H256) -> parity_rpc::Metadata { - let mut metadata = parity_rpc::Metadata::default(); - metadata.origin = parity_rpc::Origin::Signer((*session).into()); - metadata - } +pub fn new_service(ws_conf: &rpc::WsConfiguration, ui_conf: &rpc::UiConfiguration) -> rpc_apis::SignerService { + let signer_path = ws_conf.signer_path.clone(); + let signer_enabled = ui_conf.enabled; + + rpc_apis::SignerService::new(move || { + generate_new_token(&signer_path).map_err(|e| format!("{:?}", e)) + }, signer_enabled) } -pub fn start( - conf: Configuration, - queue: Arc, - deps: Dependencies, -) -> Result, String> { - if !conf.enabled { - Ok(None) - } else { - Ok(Some(do_start(conf, queue, deps)?)) - } -} - -fn codes_path(path: String) -> PathBuf { - let mut p = PathBuf::from(path); +pub fn codes_path(path: &Path) -> PathBuf { + let mut p = path.to_owned(); p.push(CODES_FILENAME); let _ = restrict_permissions_owner(&p, true, false); p } -pub fn execute(cmd: Configuration) -> Result { - Ok(generate_token_and_url(&cmd)?.message) +pub fn execute(ws_conf: rpc::WsConfiguration, ui_conf: rpc::UiConfiguration) -> Result { + Ok(generate_token_and_url(&ws_conf, &ui_conf)?.message) } -pub fn generate_token_and_url(conf: &Configuration) -> Result { - let code = generate_new_token(conf.signer_path.clone()).map_err(|err| format!("Error generating token: {}", err))?; - let auth_url = format!("http://{}:{}/#/auth?token={}", conf.interface, conf.port, code); +pub fn generate_token_and_url(ws_conf: &rpc::WsConfiguration, ui_conf: &rpc::UiConfiguration) -> Result { + let code = generate_new_token(&ws_conf.signer_path).map_err(|err| format!("Error generating token: {:?}", err))?; + let auth_url = format!("http://{}:{}/#/auth?token={}", ui_conf.interface, ui_conf.port, code); // And print in to the console Ok(NewToken { token: code.clone(), @@ -119,49 +71,12 @@ Or use the generated token: }) } -pub fn generate_new_token(path: String) -> io::Result { +fn generate_new_token(path: &Path) -> io::Result { let path = codes_path(path); - let mut codes = signer::AuthCodes::from_file(&path)?; + let mut codes = parity_rpc::AuthCodes::from_file(&path)?; codes.clear_garbage(); let code = codes.generate_new()?; codes.to_file(&path)?; trace!("New key code created: {}", Colour::White.bold().paint(&code[..])); Ok(code) } - -fn do_start( - conf: Configuration, - queue: Arc, - deps: Dependencies -) -> Result { - let addr = format!("{}:{}", conf.interface, conf.port) - .parse() - .map_err(|_| format!("Invalid port specified: {}", conf.port))?; - - let start_result = { - let server = signer::ServerBuilder::new( - queue, - codes_path(conf.signer_path), - ); - if conf.skip_origin_validation { - warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Trusted Signer with no origin validation.")); - info!("If you do not intend this, exit now."); - } - let server = server.skip_origin_validation(conf.skip_origin_validation); - let server = server.stats(deps.rpc_stats.clone()); - let handler = rpc_apis::setup_rpc(deps.rpc_stats, &*deps.apis, rpc_apis::ApiSet::SafeContext); - let remote = deps.remote.clone(); - server.start_with_extractor(addr, handler, remote, StandardExtractor) - }; - - match start_result { - Err(signer::ServerError::IoError(err)) => match err.kind() { - io::ErrorKind::AddrInUse => Err(format!("Trusted UI address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ui-port and --ui-interface options.", addr)), - _ => Err(format!("Trusted Signer io error: {}", err)), - }, - Err(e) => Err(format!("Trusted Signer Error: {:?}", e)), - Ok(server) => Ok(server), - } -} - - diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index b1e2d64aa..f9b18ef32 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -8,9 +8,13 @@ authors = ["Parity Technologies "] [lib] [dependencies] +cid = "0.2" futures = "0.1" log = "0.3" +multihash = "0.5" order-stat = "0.1" +rand = "0.3" +rust-crypto = "0.2" rustc-serialize = "0.3" semver = "0.6" serde = "0.9" @@ -19,10 +23,6 @@ serde_json = "0.9" time = "0.1" tokio-timer = "0.1" transient-hashmap = "0.4" -cid = "0.2.1" -multihash = "0.5" -rust-crypto = "0.2.36" -rand = "0.3" jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } diff --git a/rpc/rpctest/Cargo.toml b/rpc/rpctest/Cargo.toml deleted file mode 100644 index 258af2f39..000000000 --- a/rpc/rpctest/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -description = "Rpc test client." -name = "rpctest" -version = "1.7.0" -license = "GPL-3.0" -authors = ["Parity Technologies "] - -[dependencies] -ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } -docopt = "0.7" -ethcore = { path = "../../ethcore" } -ethcore-devtools = { path = "../../devtools" } -ethcore-util = { path = "../../util" } -ethjson = { path = "../../json" } -parity-rpc = { path = ".." } -rustc-serialize = "0.3" -serde_json = "0.8" diff --git a/rpc/rpctest/src/main.rs b/rpc/rpctest/src/main.rs deleted file mode 100644 index d65d2462a..000000000 --- a/rpc/rpctest/src/main.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -extern crate ctrlc; -extern crate docopt; -extern crate ethcore; -extern crate ethcore_devtools as devtools; -extern crate ethcore_util as util; -extern crate ethjson; -extern crate parity_rpc as rpc; -extern crate rustc_serialize; -extern crate serde_json; - -use std::collections::HashMap; -use std::sync::{Arc, Mutex, Condvar}; -use std::process; -use std::fs::File; -use std::path::Path; -use docopt::Docopt; -use ctrlc::CtrlC; -use ethcore::spec::Genesis; -use ethcore::pod_state::PodState; -use ethcore::ethereum; -use ethcore::client::{BlockChainClient, Client, ClientConfig}; -use devtools::RandomTempPath; -use util::IoChannel; -use rpc::v1::tests::helpers::{TestSyncProvider, Config as SyncConfig, TestMinerService, TestAccountProvider, TestAccount}; -use rpc::v1::{Eth, EthClient, EthFilter, EthFilterClient}; -use util::panics::MayPanic; -use util::hash::Address; - -const USAGE: &'static str = r#" -Parity rpctest client. - By Wood/Paronyan/Kotewicz/Drwięga/Volf. - Copyright 2015, 2016, 2017 Parity Technologies (UK) Ltd - -Usage: - rpctest --json --name [options] - rpctest --help - -Options: - --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API - server [default: 127.0.0.1]. - --jsonrpc-port PORT Specify the port portion of the JSONRPC API server - [default: 8545]. -"#; - -#[derive(Debug, RustcDecodable)] -struct Args { - arg_test_file: String, - arg_test_name: String, - flag_jsonrpc_addr: String, - flag_jsonrpc_port: u16, -} - -struct Configuration { - args: Args, -} - -impl Configuration { - fn parse() -> Self { - Configuration { - args: Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()) - } - } - - fn execute(&self) { - println!("file path: {:?}", self.args.arg_test_file); - println!("test name: {:?}", self.args.arg_test_name); - - let path = Path::new(&self.args.arg_test_file); - let file = File::open(path).unwrap_or_else(|_| { - println!("Cannot open file."); - process::exit(1); - }); - - let tests: ethjson::blockchain::Test = serde_json::from_reader(file).unwrap_or_else(|err| { - println!("Invalid json file."); - println!("{:?}", err); - process::exit(2); - }); - - let blockchain = tests.get(&self.args.arg_test_name).unwrap_or_else(|| { - println!("Invalid test name."); - process::exit(3); - }); - - let genesis = Genesis::from(blockchain.genesis()); - let state = PodState::from(blockchain.pre_state.clone()); - let mut spec = ethereum::new_frontier_test(); - spec.set_genesis_state(state); - spec.overwrite_genesis_params(genesis); - assert!(spec.is_state_root_valid()); - - let temp = RandomTempPath::new(); - { - let client: Arc = Client::new(ClientConfig::default(), spec, temp.as_path(), IoChannel::disconnected()).unwrap(); - for b in &blockchain.blocks_rlp() { - let _ = client.import_block(b.clone()); - client.flush_queue(); - client.import_verified_blocks(); - } - let sync = Arc::new(TestSyncProvider::new(SyncConfig { - protocol_version: 65, - num_peers: 120 - })); - - let miner = Arc::new(TestMinerService::default()); - let mut accs = HashMap::new(); - accs.insert(Address::from(1), TestAccount::new("test")); - let accounts = Arc::new(TestAccountProvider::new(accs)); - let server = rpc::RpcServer::new(); - server.add_delegate(EthClient::new(&client, &sync, &accounts, &miner, true).to_delegate()); - server.add_delegate(EthFilterClient::new(&client, &miner).to_delegate()); - - let url = format!("{}:{}", self.args.flag_jsonrpc_addr, self.args.flag_jsonrpc_port); - let panic_handler = server.start_http(url.as_ref(), "*", 1); - let exit = Arc::new(Condvar::new()); - - let e = exit.clone(); - CtrlC::set_handler(move || { e.notify_all(); }); - - let e = exit.clone(); - panic_handler.on_panic(move |_reason| { e.notify_all(); }); - - let mutex = Mutex::new(()); - let _ = exit.wait(mutex.lock()).unwrap(); - } - - } -} - -fn main() { - Configuration::parse().execute(); -} diff --git a/signer/src/authcode_store.rs b/rpc/src/authcodes.rs similarity index 99% rename from signer/src/authcode_store.rs rename to rpc/src/authcodes.rs index 3ca640aa7..57de437ab 100644 --- a/signer/src/authcode_store.rs +++ b/rpc/src/authcodes.rs @@ -14,11 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use rand::Rng; -use rand::os::OsRng; use std::io::{self, Read, Write}; use std::path::Path; use std::{fs, time, mem}; + +use rand::Rng; +use rand::os::OsRng; use util::{H256, Hashable, Itertools}; /// Providing current time in seconds @@ -347,5 +348,3 @@ mod tests { } } - - diff --git a/rpc/src/metadata.rs b/rpc/src/http_common.rs similarity index 64% rename from rpc/src/metadata.rs rename to rpc/src/http_common.rs index af3a5d183..edbb16140 100644 --- a/rpc/src/metadata.rs +++ b/rpc/src/http_common.rs @@ -14,11 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Transport-specific metadata extractors. + use jsonrpc_core; use http; use hyper; use minihttp; -use HttpMetaExtractor; + +/// HTTP RPC server impl-independent metadata extractor +pub trait HttpMetaExtractor: Send + Sync + 'static { + /// Type of Metadata + type Metadata: jsonrpc_core::Metadata; + /// Extracts metadata from given params. + fn read_metadata(&self, origin: Option, user_agent: Option, dapps_origin: Option) -> Self::Metadata; +} pub struct HyperMetaExtractor { extractor: T, @@ -37,13 +46,14 @@ impl http::MetaExtractor for HyperMetaExtractor where M: jsonrpc_core::Metadata, { fn read_metadata(&self, req: &hyper::server::Request) -> M { - let origin = req.headers().get::() - .map(|origin| format!("{}://{}", origin.scheme, origin.host)) - .unwrap_or_else(|| "unknown".into()); - let dapps_origin = req.headers().get_raw("x-parity-origin") + let as_string = |header: Option<&http::request_response::header::Raw>| header .and_then(|raw| raw.one()) .map(|raw| String::from_utf8_lossy(raw).into_owned()); - self.extractor.read_metadata(origin, dapps_origin) + + let origin = as_string(req.headers().get_raw("origin")); + let user_agent = as_string(req.headers().get_raw("user-agent")); + let dapps_origin = as_string(req.headers().get_raw("x-parity-origin")); + self.extractor.read_metadata(origin, user_agent, dapps_origin) } } @@ -64,11 +74,10 @@ impl minihttp::MetaExtractor for MiniMetaExtractor where M: jsonrpc_core::Metadata, { fn read_metadata(&self, req: &minihttp::Req) -> M { - let origin = req.header("origin") - .unwrap_or_else(|| "unknown") - .to_owned(); + let origin = req.header("origin").map(|h| h.to_owned()); + let user_agent = req.header("user-agent").map(|h| h.to_owned()); let dapps_origin = req.header("x-parity-origin").map(|h| h.to_owned()); - self.extractor.read_metadata(origin, dapps_origin) + self.extractor.read_metadata(origin, user_agent, dapps_origin) } } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 4540d6b33..22c9b98cd 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -14,13 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Ethcore rpc. -#![warn(missing_docs)] -#![cfg_attr(feature="nightly", feature(plugin))] -#![cfg_attr(feature="nightly", plugin(clippy))] +//! Parity RPC. +#![warn(missing_docs)] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] + +extern crate cid; +extern crate crypto as rust_crypto; extern crate futures; +extern crate multihash; extern crate order_stat; +extern crate rand; extern crate rustc_serialize; extern crate semver; extern crate serde; @@ -28,10 +33,6 @@ extern crate serde_json; extern crate time; extern crate tokio_timer; extern crate transient_hashmap; -extern crate cid; -extern crate multihash; -extern crate crypto as rust_crypto; -extern crate rand; extern crate jsonrpc_core; extern crate jsonrpc_http_server as http; @@ -41,6 +42,7 @@ extern crate jsonrpc_pubsub; extern crate ethash; extern crate ethcore; +extern crate ethcore_devtools as devtools; extern crate ethcore_io as io; extern crate ethcore_ipc; extern crate ethcore_light as light; @@ -66,8 +68,6 @@ extern crate serde_derive; #[cfg(test)] extern crate ethjson; -#[cfg(test)] -extern crate ethcore_devtools as devtools; #[cfg(test)] #[macro_use] @@ -75,9 +75,12 @@ extern crate pretty_assertions; pub extern crate jsonrpc_ws_server as ws; -mod metadata; +mod authcodes; +mod http_common; pub mod v1; +pub mod tests; + pub use jsonrpc_pubsub::Session as PubSubSession; pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext}; pub use http::{ @@ -86,8 +89,11 @@ pub use http::{ AccessControlAllowOrigin, Host, DomainsValidation }; -pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; +pub use v1::{NetworkSettings, Metadata, Origin, informant, dispatch, signer, dapps}; pub use v1::block_import::is_major_importing; +pub use v1::extractors::{RpcExtractor, WsExtractor, WsStats, WsDispatcher}; +pub use authcodes::{AuthCodes, TimeProvider}; +pub use http_common::HttpMetaExtractor; use std::net::SocketAddr; use http::tokio_core; @@ -100,6 +106,16 @@ pub enum HttpServer { Hyper(http::Server), } +impl HttpServer { + /// Returns current listening address. + pub fn address(&self) -> &SocketAddr { + match *self { + HttpServer::Mini(ref s) => s.address(), + HttpServer::Hyper(ref s) => &s.addrs()[0], + } + } +} + /// RPC HTTP Server error #[derive(Debug)] pub enum HttpServerError { @@ -128,14 +144,6 @@ impl From for HttpServerError { } } -/// HTTP RPC server impl-independent metadata extractor -pub trait HttpMetaExtractor: Send + Sync + 'static { - /// Type of Metadata - type Metadata: jsonrpc_core::Metadata; - /// Extracts metadata from given params. - fn read_metadata(&self, origin: String, dapps_origin: Option) -> Self::Metadata; -} - /// HTTP server implementation-specific settings. pub enum HttpSettings { /// Enable fast minihttp server with given number of threads. @@ -164,7 +172,7 @@ pub fn start_http( HttpSettings::Dapps(middleware) => { let mut builder = http::ServerBuilder::new(handler) .event_loop_remote(remote) - .meta_extractor(metadata::HyperMetaExtractor::new(extractor)) + .meta_extractor(http_common::HyperMetaExtractor::new(extractor)) .cors(cors_domains.into()) .allowed_hosts(allowed_hosts.into()); @@ -177,7 +185,7 @@ pub fn start_http( HttpSettings::Threads(threads) => { minihttp::ServerBuilder::new(handler) .threads(threads) - .meta_extractor(metadata::MiniMetaExtractor::new(extractor)) + .meta_extractor(http_common::MiniMetaExtractor::new(extractor)) .cors(cors_domains.into()) .allowed_hosts(allowed_hosts.into()) .start_http(addr) @@ -205,13 +213,14 @@ pub fn start_ipc( } /// Start WS server and return `Server` handle. -pub fn start_ws( +pub fn start_ws( addr: &SocketAddr, handler: H, remote: tokio_core::reactor::Remote, allowed_origins: ws::DomainsValidation, allowed_hosts: ws::DomainsValidation, extractor: T, + middleware: V, stats: U, ) -> Result where M: jsonrpc_core::Metadata, @@ -219,9 +228,11 @@ pub fn start_ws( H: Into>, T: ws::MetaExtractor, U: ws::SessionStats, + V: ws::RequestMiddleware, { ws::ServerBuilder::new(handler) .event_loop_remote(remote) + .request_middleware(middleware) .allowed_origins(allowed_origins) .allowed_hosts(allowed_hosts) .session_meta_extractor(extractor) diff --git a/rpc/src/tests/helpers.rs b/rpc/src/tests/helpers.rs new file mode 100644 index 000000000..e7a1684c5 --- /dev/null +++ b/rpc/src/tests/helpers.rs @@ -0,0 +1,84 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::ops::{Deref, DerefMut}; + +use devtools::RandomTempPath; +use parity_reactor::{EventLoop, TokioRemote}; + +use authcodes::AuthCodes; + +/// Server with event loop +pub struct Server { + /// Server + pub server: T, + /// RPC Event Loop + pub event_loop: EventLoop, +} + +impl Server { + pub fn new(f: F) -> Server where + F: FnOnce(TokioRemote) -> T, + { + let event_loop = EventLoop::spawn(); + let remote = event_loop.raw_remote(); + + Server { + server: f(remote), + event_loop: event_loop, + } + } +} + +impl Deref for Server { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.server + } +} + +/// Struct representing authcodes +pub struct GuardedAuthCodes { + authcodes: AuthCodes, + /// The path to the mock authcodes + pub path: RandomTempPath, +} + +impl GuardedAuthCodes { + pub fn new() -> Self { + let mut path = RandomTempPath::new(); + path.panic_on_drop_failure = false; + + GuardedAuthCodes { + authcodes: AuthCodes::from_file(&path).unwrap(), + path: path, + } + } +} + +impl Deref for GuardedAuthCodes { + type Target = AuthCodes; + fn deref(&self) -> &Self::Target { + &self.authcodes + } +} + +impl DerefMut for GuardedAuthCodes { + fn deref_mut(&mut self) -> &mut AuthCodes { + &mut self.authcodes + } +} diff --git a/signer/build.rs b/rpc/src/tests/mod.rs similarity index 79% rename from signer/build.rs rename to rpc/src/tests/mod.rs index 6a89bd269..d4d9538dc 100644 --- a/signer/build.rs +++ b/rpc/src/tests/mod.rs @@ -14,12 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate rustc_version; +//! RPC integration tests. -use rustc_version::{version_meta, Channel}; - -fn main() { - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } -} +mod helpers; +#[cfg(test)] mod rpc; +pub mod ws; diff --git a/rpc/src/tests/rpc.rs b/rpc/src/tests/rpc.rs new file mode 100644 index 000000000..7bd156cf5 --- /dev/null +++ b/rpc/src/tests/rpc.rs @@ -0,0 +1,172 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use devtools::http_client; +use jsonrpc_core::MetaIoHandler; +use http::{self, hyper}; + +use {HttpSettings, HttpServer}; +use tests::helpers::Server; +use v1::{extractors, Metadata}; + +fn serve(handler: Option>) -> Server { + let address = "127.0.0.1:0".parse().unwrap(); + let handler = handler.unwrap_or_default(); + + Server::new(|remote| ::start_http( + &address, + http::DomainsValidation::Disabled, + http::DomainsValidation::Disabled, + handler, + remote, + extractors::RpcExtractor, + HttpSettings::Dapps(Some(|_req: &hyper::server::Request, _control: &hyper::Control| { + http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: false + } + })), + ).unwrap()) +} + +/// Test a single request to running server +fn request(server: Server, request: &str) -> http_client::Response { + http_client::request(server.server.address(), request) +} + +#[cfg(test)] +mod testsing { + use jsonrpc_core::{MetaIoHandler, Value}; + use jsonrpc_core::futures::{Future, future}; + use v1::Metadata; + use super::{request, Server}; + + fn serve() -> (Server<::HttpServer>, ::std::net::SocketAddr) { + let mut io = MetaIoHandler::default(); + io.add_method_with_meta("hello", |_, meta: Metadata| { + future::ok(Value::String(format!("{}", meta.origin))).boxed() + }); + let server = super::serve(Some(io)); + let address = server.server.address().to_owned(); + + (server, address) + } + + #[test] + fn should_extract_rpc_origin() { + // given + let (server, address) = serve(); + + // when + let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; + let expected = "34\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown via RPC\",\"id\":1}\n\n0\n\n"; + let res = request(server, + &format!("\ + POST / HTTP/1.1\r\n\ + Host: {}\r\n\ + Content-Type: application/json\r\n\ + Content-Length: {}\r\n\ + Connection: close\r\n\ + \r\n\ + {} + ", address, req.len(), req) + ); + + // then + res.assert_status("HTTP/1.1 200 OK"); + assert_eq!(res.body, expected); + } + + #[test] + fn should_extract_rpc_origin_with_service() { + // given + let (server, address) = serve(); + + // when + let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; + let expected = "38\n{\"jsonrpc\":\"2.0\",\"result\":\"curl/7.16.3 via RPC\",\"id\":1}\n\n0\n\n"; + let res = request(server, + &format!("\ + POST / HTTP/1.1\r\n\ + Host: {}\r\n\ + Content-Type: application/json\r\n\ + Content-Length: {}\r\n\ + Connection: close\r\n\ + User-Agent: curl/7.16.3\r\n\ + \r\n\ + {} + ", address, req.len(), req) + ); + + // then + res.assert_status("HTTP/1.1 200 OK"); + assert_eq!(res.body, expected); + } + + #[test] + fn should_extract_dapp_origin() { + // given + let (server, address) = serve(); + + // when + let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; + let expected = "3A\n{\"jsonrpc\":\"2.0\",\"result\":\"Dapp http://parity.io\",\"id\":1}\n\n0\n\n"; + let res = request(server, + &format!("\ + POST / HTTP/1.1\r\n\ + Host: {}\r\n\ + Content-Type: application/json\r\n\ + Content-Length: {}\r\n\ + Origin: http://parity.io\r\n\ + Connection: close\r\n\ + User-Agent: curl/7.16.3\r\n\ + \r\n\ + {} + ", address, req.len(), req) + ); + + // then + res.assert_status("HTTP/1.1 200 OK"); + assert_eq!(res.body, expected); + } + + #[test] + fn should_extract_dapp_origin_from_extension() { + // given + let (server, address) = serve(); + + // when + let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; + let expected = "44\n{\"jsonrpc\":\"2.0\",\"result\":\"Dapp http://wallet.ethereum.org\",\"id\":1}\n\n0\n\n"; + let res = request(server, + &format!("\ + POST / HTTP/1.1\r\n\ + Host: {}\r\n\ + Content-Type: application/json\r\n\ + Content-Length: {}\r\n\ + Origin: null\r\n\ + X-Parity-Origin: http://wallet.ethereum.org\r\n\ + Connection: close\r\n\ + User-Agent: curl/7.16.3\r\n\ + \r\n\ + {} + ", address, req.len(), req) + ); + + // then + res.assert_status("HTTP/1.1 200 OK"); + assert_eq!(res.body, expected); + } +} diff --git a/signer/src/tests/mod.rs b/rpc/src/tests/ws.rs similarity index 53% rename from signer/src/tests/mod.rs rename to rpc/src/tests/ws.rs index bc90a6cd3..77fb1ea2c 100644 --- a/signer/src/tests/mod.rs +++ b/rpc/src/tests/ws.rs @@ -14,79 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::ops::{Deref, DerefMut}; +//! WebSockets server tests. + use std::sync::Arc; use devtools::http_client; -use devtools::RandomTempPath; - -use rpc::ConfirmationsQueue; -use jsonrpc_core::IoHandler; -use jsonrpc_server_utils::reactor::RpcEventLoop; +use jsonrpc_core::MetaIoHandler; use rand; +use ws; -use ServerBuilder; -use Server; -use AuthCodes; - -/// Struct representing authcodes -pub struct GuardedAuthCodes { - authcodes: AuthCodes, - /// The path to the mock authcodes - pub path: RandomTempPath, -} -impl Deref for GuardedAuthCodes { - type Target = AuthCodes; - fn deref(&self) -> &Self::Target { - &self.authcodes - } -} -impl DerefMut for GuardedAuthCodes { - fn deref_mut(&mut self) -> &mut AuthCodes { - &mut self.authcodes - } -} - -/// Server with event loop -pub struct ServerLoop { - /// Signer Server - pub server: Server, - /// RPC Event Loop - pub event_loop: RpcEventLoop, -} - -impl Deref for ServerLoop { - type Target = Server; - - fn deref(&self) -> &Self::Target { - &self.server - } -} +use v1::{extractors, informant}; +use tests::helpers::{GuardedAuthCodes, Server}; /// Setup a mock signer for tests -pub fn serve() -> (ServerLoop, usize, GuardedAuthCodes) { - let mut path = RandomTempPath::new(); - path.panic_on_drop_failure = false; - let queue = Arc::new(ConfirmationsQueue::default()); - let builder = ServerBuilder::new(queue, path.to_path_buf()); +pub fn serve() -> (Server, usize, GuardedAuthCodes) { let port = 35000 + rand::random::() % 10000; - let event_loop = RpcEventLoop::spawn().unwrap(); - let io = IoHandler::default(); - let remote = event_loop.remote(); - let server = builder.start(format!("127.0.0.1:{}", port).parse().unwrap(), io, remote).unwrap(); - let res = ServerLoop { - server: server, - event_loop: event_loop, - }; + let address = format!("127.0.0.1:{}", port).parse().unwrap(); + let io = MetaIoHandler::default(); + let authcodes = GuardedAuthCodes::new(); + let stats = Arc::new(informant::RpcStats::default()); - (res, port, GuardedAuthCodes { - authcodes: AuthCodes::from_file(&path).unwrap(), - path: path, - }) + let res = Server::new(|remote| ::start_ws( + &address, + io, + remote, + ws::DomainsValidation::Disabled, + ws::DomainsValidation::Disabled, + extractors::WsExtractor::new(Some(&authcodes.path)), + extractors::WsExtractor::new(Some(&authcodes.path)), + extractors::WsStats::new(stats), + ).unwrap()); + + (res, port, authcodes) } /// Test a single request to running server -pub fn request(server: ServerLoop, request: &str) -> http_client::Response { +pub fn request(server: Server, request: &str) -> http_client::Response { http_client::request(server.server.addr(), request) } @@ -97,49 +60,6 @@ mod testing { use devtools::http_client; use super::{serve, request}; - #[test] - fn should_reject_invalid_host() { - // given - let server = serve().0; - - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: test:8180\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); - assert!(response.body.contains("URL Blocked")); - http_client::assert_security_headers_present(&response.headers, None); - } - - #[test] - fn should_allow_home_parity_host() { - // given - let server = serve().0; - - // when - let response = request(server, - "\ - GET http://parity.web3.site/ HTTP/1.1\r\n\ - Host: parity.web3.site\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - http_client::assert_security_headers_present(&response.headers, None); - } - #[test] fn should_not_redirect_to_parity_host() { // given @@ -157,48 +77,7 @@ mod testing { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - } - - #[test] - fn should_serve_styles_even_on_disallowed_domain() { - // given - let server = serve().0; - - // when - let response = request(server, - "\ - GET /styles.css HTTP/1.1\r\n\ - Host: test:8180\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - http_client::assert_security_headers_present(&response.headers, None); - } - - #[test] - fn should_return_200_ok_for_connect_requests() { - // given - let server = serve().0; - - // when - let response = request(server, - "\ - CONNECT parity.web3.site:8080 HTTP/1.1\r\n\ - Host: parity.web3.site\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + assert_eq!(response.status, "HTTP/1.1 200 Ok".to_owned()); } #[test] @@ -221,7 +100,7 @@ mod testing { ); // then - assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); + assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); http_client::assert_security_headers_present(&response.headers, None); } @@ -300,7 +179,7 @@ mod testing { // then assert_eq!(response1.status, "HTTP/1.1 101 Switching Protocols".to_owned()); - assert_eq!(response2.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); + assert_eq!(response2.status, "HTTP/1.1 403 Forbidden".to_owned()); http_client::assert_security_headers_present(&response2.headers, None); } } diff --git a/rpc/src/v1/extractors.rs b/rpc/src/v1/extractors.rs new file mode 100644 index 000000000..1feaf4d9b --- /dev/null +++ b/rpc/src/v1/extractors.rs @@ -0,0 +1,263 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Parity-specific metadata extractors. + +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use authcodes; +use http_common::HttpMetaExtractor; +use ipc; +use jsonrpc_core as core; +use jsonrpc_pubsub::Session; +use ws; +use util::H256; + +use v1::{Metadata, Origin}; +use v1::informant::RpcStats; + +/// Common HTTP & IPC metadata extractor. +pub struct RpcExtractor; + +impl HttpMetaExtractor for RpcExtractor { + type Metadata = Metadata; + + fn read_metadata(&self, origin: Option, user_agent: Option, dapps_origin: Option) -> Metadata { + let mut metadata = Metadata::default(); + + metadata.origin = match (origin.as_ref().map(|s| s.as_str()), user_agent, dapps_origin) { + (Some("null"), _, Some(dapp)) => Origin::Dapps(dapp.into()), + (Some(dapp), _, _) => Origin::Dapps(dapp.to_owned().into()), + (None, Some(service), _) => Origin::Rpc(service.into()), + (None, _, _) => Origin::Rpc("unknown".into()), + }; + + metadata + } +} + +impl ipc::MetaExtractor for RpcExtractor { + fn extract(&self, _req: &ipc::RequestContext) -> Metadata { + let mut metadata = Metadata::default(); + // TODO [ToDr] Extract proper session id when it's available in context. + metadata.origin = Origin::Ipc(1.into()); + metadata + } +} + +/// WebSockets server metadata extractor and request middleware. +pub struct WsExtractor { + authcodes_path: Option, +} + +impl WsExtractor { + /// Creates new `WsExtractor` with given authcodes path. + pub fn new(path: Option<&Path>) -> Self { + WsExtractor { + authcodes_path: path.map(|p| p.to_owned()), + } + } +} + +impl ws::MetaExtractor for WsExtractor { + fn extract(&self, req: &ws::RequestContext) -> Metadata { + let mut metadata = Metadata::default(); + let id = req.session_id as u64; + // TODO [ToDr] Extract dapp from Origin + let dapp = "".into(); + metadata.origin = match self.authcodes_path { + Some(ref path) => { + let authorization = req.protocols.get(0).and_then(|p| auth_token_hash(&path, p)); + match authorization { + Some(id) => Origin::Signer { session: id.into(), dapp: dapp }, + None => Origin::Ws { session: id.into(), dapp: dapp }, + } + }, + None => Origin::Ws { session: id.into(), dapp: dapp }, + }; + metadata.session = Some(Arc::new(Session::new(req.sender()))); + metadata + } +} + +impl ws::RequestMiddleware for WsExtractor { + fn process(&self, req: &ws::ws::Request) -> ws::MiddlewareAction { + use self::ws::ws::Response; + + // Reply with 200 Ok to HEAD requests. + if req.method() == "HEAD" { + let mut response = Response::new(200, "Ok"); + add_security_headers(&mut response); + return Some(response).into(); + } + + // Display WS info. + if req.header("sec-websocket-key").is_none() { + let mut response = Response::new(200, "Ok"); + response.set_body("WebSocket interface is active. Open WS connection to access RPC."); + add_security_headers(&mut response); + return Some(response).into(); + } + + // If protocol is provided it needs to be valid. + let protocols = req.protocols().ok().unwrap_or_else(Vec::new); + if let Some(ref path) = self.authcodes_path { + if protocols.len() == 1 { + let authorization = auth_token_hash(&path, protocols[0]); + if authorization.is_none() { + warn!( + "Blocked connection from {} using invalid token.", + req.header("origin").and_then(|e| ::std::str::from_utf8(e).ok()).unwrap_or("Unknown Origin") + ); + let mut response = Response::new(403, "Forbidden"); + add_security_headers(&mut response); + return Some(response).into(); + } + } + } + + // Otherwise just proceed. + ws::MiddlewareAction::Proceed + } +} + +fn add_security_headers(res: &mut ws::ws::Response) { + let mut headers = res.headers_mut(); + headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec())); + headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec())); + headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec())); +} + +fn auth_token_hash(codes_path: &Path, protocol: &str) -> Option { + let mut split = protocol.split('_'); + let auth = split.next().and_then(|v| v.parse().ok()); + let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok()); + + if let (Some(auth), Some(time)) = (auth, time) { + // Check if the code is valid + return authcodes::AuthCodes::from_file(codes_path) + .ok() + .and_then(|mut codes| { + // remove old tokens + codes.clear_garbage(); + + let res = codes.is_valid(&auth, time); + // make sure to save back authcodes - it might have been modified + if codes.to_file(codes_path).is_err() { + warn!(target: "signer", "Couldn't save authorization codes to file."); + } + + if res { + Some(auth) + } else { + None + } + }) + } + + None +} + +/// WebSockets RPC usage statistics. +pub struct WsStats { + stats: Arc, +} + +impl WsStats { + /// Creates new WS usage tracker. + pub fn new(stats: Arc) -> Self { + WsStats { + stats: stats, + } + } +} + +impl ws::SessionStats for WsStats { + fn open_session(&self, _id: ws::SessionId) { + self.stats.open_session() + } + + fn close_session(&self, _id: ws::SessionId) { + self.stats.close_session() + } +} + +/// WebSockets middleware dispatching requests to different handles dependning on metadata. +pub struct WsDispatcher> { + full_handler: core::MetaIoHandler, +} + +impl> WsDispatcher { + /// Create new `WsDispatcher` with given full handler. + pub fn new(full_handler: core::MetaIoHandler) -> Self { + WsDispatcher { + full_handler: full_handler, + } + } +} + +impl> core::Middleware for WsDispatcher { + fn on_request(&self, request: core::Request, meta: Metadata, process: F) -> core::FutureResponse where + F: FnOnce(core::Request, Metadata) -> core::FutureResponse, + { + let use_full = match &meta.origin { + &Origin::Signer { .. } => true, + _ => false, + }; + + if use_full { + self.full_handler.handle_rpc_request(request, meta) + } else { + process(request, meta) + } + } +} + +#[cfg(test)] +mod tests { + use super::RpcExtractor; + use {HttpMetaExtractor, Origin}; + + #[test] + fn should_extract_rpc_origin() { + // given + let extractor = RpcExtractor; + + // when + let meta1 = extractor.read_metadata(None, None, None); + let meta2 = extractor.read_metadata(None, Some("http://parity.io".to_owned()), None); + let meta3 = extractor.read_metadata(None, Some("http://parity.io".to_owned()), Some("ignored".into())); + + // then + assert_eq!(meta1.origin, Origin::Rpc("unknown".into())); + assert_eq!(meta2.origin, Origin::Rpc("http://parity.io".into())); + assert_eq!(meta3.origin, Origin::Rpc("http://parity.io".into())); + } + + #[test] + fn should_dapps_origin() { + // given + let extractor = RpcExtractor; + let dapp = "https://wallet.ethereum.org".to_owned(); + + // when + let meta = extractor.read_metadata(Some("null".into()), None, Some(dapp.clone())); + + // then + assert_eq!(meta.origin, Origin::Dapps(dapp.into())); + } +} diff --git a/rpc/src/v1/helpers/dapps.rs b/rpc/src/v1/helpers/dapps.rs new file mode 100644 index 000000000..34f4fe1b5 --- /dev/null +++ b/rpc/src/v1/helpers/dapps.rs @@ -0,0 +1,33 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Dapps Service + +use v1::types::LocalDapp; + +/// Dapps Server service. +pub trait DappsService: Send + Sync + 'static { + /// List available local dapps. + fn list_dapps(&self) -> Vec; +} + +impl DappsService for F where + F: Fn() -> Vec + Send + Sync + 'static +{ + fn list_dapps(&self) -> Vec { + (*self)() + } +} diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 02b5848df..8ec266c76 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -209,6 +209,14 @@ pub fn dapps_disabled() -> Error { } } +pub fn ws_disabled() -> Error { + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), + message: "WebSockets Server is disabled. This API is not available.".into(), + data: None, + } +} + pub fn network_disabled() -> Error { Error { code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index 1f950f113..fcb452039 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -19,10 +19,10 @@ pub mod errors; pub mod accounts; pub mod block_import; +pub mod dapps; pub mod dispatch; pub mod fake_sign; pub mod light_fetch; -pub mod informant; pub mod oneshot; pub mod ipfs; pub mod secretstore; @@ -50,3 +50,7 @@ pub use self::signing_queue::{ pub use self::signer::SignerService; pub use self::subscribers::Subscribers; pub use self::subscription_manager::GenericPollManager; + +pub fn to_url(address: &Option<(String, u16)>) -> Option { + address.as_ref().map(|&(ref iface, ref port)| format!("{}:{}", iface, port)) +} diff --git a/rpc/src/v1/helpers/signer.rs b/rpc/src/v1/helpers/signer.rs index 52c3e731d..f35832a40 100644 --- a/rpc/src/v1/helpers/signer.rs +++ b/rpc/src/v1/helpers/signer.rs @@ -27,21 +27,21 @@ const TOKEN_LIFETIME_SECS: u32 = 3600; /// Manages communication with Signer crate pub struct SignerService { + is_enabled: bool, queue: Arc, web_proxy_tokens: Mutex>, generate_new_token: Box Result + Send + Sync + 'static>, - address: Option<(String, u16)>, } impl SignerService { /// Creates new Signer Service given function to generate new tokens. - pub fn new(new_token: F, address: Option<(String, u16)>) -> Self + pub fn new(new_token: F, is_enabled: bool) -> Self where F: Fn() -> Result + Send + Sync + 'static { SignerService { queue: Arc::new(ConfirmationsQueue::default()), web_proxy_tokens: Mutex::new(TransientHashMap::new(TOKEN_LIFETIME_SECS)), generate_new_token: Box::new(new_token), - address: address, + is_enabled: is_enabled, } } @@ -69,20 +69,15 @@ impl SignerService { self.queue.clone() } - /// Returns signer address (if signer enabled) or `None` otherwise - pub fn address(&self) -> Option<(String, u16)> { - self.address.clone() - } - /// Returns true if Signer is enabled. pub fn is_enabled(&self) -> bool { - self.address.is_some() + self.is_enabled } #[cfg(test)] /// Creates new Signer Service for tests. - pub fn new_test(address: Option<(String, u16)>) -> Self { - SignerService::new(|| Ok("new_token".into()), address) + pub fn new_test(is_enabled: bool) -> Self { + SignerService::new(|| Ok("new_token".into()), is_enabled) } } diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 015d4fef7..f0cdb5236 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -33,7 +33,7 @@ use light::client::LightChainClient; use jsonrpc_core::Error; use jsonrpc_macros::Trailing; -use v1::helpers::{errors, ipfs, SigningQueue, SignerService, NetworkSettings}; +use v1::helpers::{self, errors, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::dispatch::LightDispatcher; use v1::helpers::light_fetch::LightFetch; use v1::metadata::Metadata; @@ -54,8 +54,8 @@ pub struct ParityClient { logger: Arc, settings: Arc, signer: Option>, - dapps_interface: Option, - dapps_port: Option, + dapps_address: Option<(String, u16)>, + ws_address: Option<(String, u16)>, eip86_transition: u64, } @@ -68,8 +68,8 @@ impl ParityClient { logger: Arc, settings: Arc, signer: Option>, - dapps_interface: Option, - dapps_port: Option, + dapps_address: Option<(String, u16)>, + ws_address: Option<(String, u16)>, ) -> Self { ParityClient { light_dispatch: light_dispatch, @@ -77,8 +77,8 @@ impl ParityClient { logger: logger, settings: settings, signer: signer, - dapps_interface: dapps_interface, - dapps_port: dapps_port, + dapps_address: dapps_address, + ws_address: ws_address, eip86_transition: client.eip86_transition(), } } @@ -294,22 +294,14 @@ impl Parity for ParityClient { Ok(map) } - fn signer_port(&self) -> Result { - self.signer - .clone() - .and_then(|signer| signer.address()) - .map(|address| address.1) - .ok_or_else(|| errors::signer_disabled()) - } - - fn dapps_port(&self) -> Result { - self.dapps_port + fn dapps_url(&self) -> Result { + helpers::to_url(&self.dapps_address) .ok_or_else(|| errors::dapps_disabled()) } - fn dapps_interface(&self) -> Result { - self.dapps_interface.clone() - .ok_or_else(|| errors::dapps_disabled()) + fn ws_url(&self) -> Result { + helpers::to_url(&self.ws_address) + .ok_or_else(|| errors::ws_disabled()) } fn next_nonce(&self, address: H160) -> BoxFuture { diff --git a/rpc/src/v1/impls/light/parity_set.rs b/rpc/src/v1/impls/light/parity_set.rs index 40af2f44c..cab2fa91c 100644 --- a/rpc/src/v1/impls/light/parity_set.rs +++ b/rpc/src/v1/impls/light/parity_set.rs @@ -26,21 +26,24 @@ use futures::{BoxFuture, Future}; use util::sha3; use jsonrpc_core::Error; +use v1::helpers::dapps::DappsService; use v1::helpers::errors; use v1::traits::ParitySet; -use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction}; +use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; /// Parity-specific rpc interface for operations altering the settings. pub struct ParitySetClient { net: Arc, + dapps: Option>, fetch: F, } impl ParitySetClient { /// Creates new `ParitySetClient` with given `Fetch`. - pub fn new(net: Arc, fetch: F) -> Self { + pub fn new(net: Arc, dapps: Option>, fetch: F) -> Self { ParitySetClient { net: net, + dapps: dapps, fetch: fetch, } } @@ -132,6 +135,10 @@ impl ParitySet for ParitySetClient { })) } + fn dapps_list(&self) -> Result, Error> { + self.dapps.as_ref().map(|dapps| dapps.list_dapps()).ok_or_else(errors::dapps_disabled) + } + fn upgrade_ready(&self) -> Result, Error> { Err(errors::light_unimplemented(None)) } diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index ae91af54c..377be5bb0 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -38,7 +38,7 @@ use crypto::DEFAULT_MAC; use jsonrpc_core::Error; use jsonrpc_macros::Trailing; -use v1::helpers::{errors, ipfs, SigningQueue, SignerService, NetworkSettings}; +use v1::helpers::{self, errors, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::accounts::unwrap_provider; use v1::metadata::Metadata; use v1::traits::Parity; @@ -67,8 +67,8 @@ pub struct ParityClient where logger: Arc, settings: Arc, signer: Option>, - dapps_interface: Option, - dapps_port: Option, + dapps_address: Option<(String, u16)>, + ws_address: Option<(String, u16)>, eip86_transition: u64, } @@ -89,8 +89,8 @@ impl ParityClient where logger: Arc, settings: Arc, signer: Option>, - dapps_interface: Option, - dapps_port: Option, + dapps_address: Option<(String, u16)>, + ws_address: Option<(String, u16)>, ) -> Self { ParityClient { client: Arc::downgrade(client), @@ -102,8 +102,8 @@ impl ParityClient where logger: logger, settings: settings, signer: signer, - dapps_interface: dapps_interface, - dapps_port: dapps_port, + dapps_address: dapps_address, + ws_address: ws_address, eip86_transition: client.eip86_transition(), } } @@ -317,22 +317,14 @@ impl Parity for ParityClient where ) } - fn signer_port(&self) -> Result { - self.signer - .clone() - .and_then(|signer| signer.address()) - .map(|address| address.1) - .ok_or_else(|| errors::signer_disabled()) - } - - fn dapps_port(&self) -> Result { - self.dapps_port + fn dapps_url(&self) -> Result { + helpers::to_url(&self.dapps_address) .ok_or_else(|| errors::dapps_disabled()) } - fn dapps_interface(&self) -> Result { - self.dapps_interface.clone() - .ok_or_else(|| errors::dapps_disabled()) + fn ws_url(&self) -> Result { + helpers::to_url(&self.ws_address) + .ok_or_else(|| errors::ws_disabled()) } fn next_nonce(&self, address: H160) -> BoxFuture { diff --git a/rpc/src/v1/impls/parity_set.rs b/rpc/src/v1/impls/parity_set.rs index 7bd8bcb91..a6baa4a94 100644 --- a/rpc/src/v1/impls/parity_set.rs +++ b/rpc/src/v1/impls/parity_set.rs @@ -28,9 +28,10 @@ use util::sha3; use updater::{Service as UpdateService}; use jsonrpc_core::Error; +use v1::helpers::dapps::DappsService; use v1::helpers::errors; use v1::traits::ParitySet; -use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction}; +use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; /// Parity-specific rpc interface for operations altering the settings. pub struct ParitySetClient { @@ -38,6 +39,7 @@ pub struct ParitySetClient { miner: Weak, updater: Weak, net: Weak, + dapps: Option>, fetch: F, eip86_transition: u64, } @@ -46,12 +48,20 @@ impl ParitySetClient where C: MiningBlockChainClient + 'static, { /// Creates new `ParitySetClient` with given `Fetch`. - pub fn new(client: &Arc, miner: &Arc, updater: &Arc, net: &Arc, fetch: F) -> Self { + pub fn new( + client: &Arc, + miner: &Arc, + updater: &Arc, + net: &Arc, + dapps: Option>, + fetch: F, + ) -> Self { ParitySetClient { client: Arc::downgrade(client), miner: Arc::downgrade(miner), updater: Arc::downgrade(updater), net: Arc::downgrade(net), + dapps: dapps, fetch: fetch, eip86_transition: client.eip86_transition(), } @@ -166,6 +176,10 @@ impl ParitySet for ParitySetClient where })) } + fn dapps_list(&self) -> Result, Error> { + self.dapps.as_ref().map(|dapps| dapps.list_dapps()).ok_or_else(errors::dapps_disabled) + } + fn upgrade_ready(&self) -> Result, Error> { let updater = take_weak!(self.updater); Ok(updater.upgrade_ready().map(Into::into)) diff --git a/rpc/src/v1/helpers/informant.rs b/rpc/src/v1/informant.rs similarity index 100% rename from rpc/src/v1/helpers/informant.rs rename to rpc/src/v1/informant.rs diff --git a/rpc/src/v1/metadata.rs b/rpc/src/v1/metadata.rs index 74567d510..b65003ad8 100644 --- a/rpc/src/v1/metadata.rs +++ b/rpc/src/v1/metadata.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Parity RPC requests Metadata. use std::sync::Arc; use jsonrpc_core; @@ -35,7 +36,9 @@ impl Metadata { pub fn dapp_id(&self) -> DappId { // TODO [ToDr] Extract dapp info from Ws connections. match self.origin { - Origin::Dapps(ref dapp_id) => dapp_id.clone(), + Origin::Dapps(ref dapp) => dapp.clone(), + Origin::Ws { ref dapp, .. } => dapp.clone(), + Origin::Signer { ref dapp, .. } => dapp.clone(), _ => DappId::default(), } } diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index bd8675196..a50b6ec90 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -52,14 +52,30 @@ macro_rules! try_bf { #[macro_use] mod helpers; mod impls; -mod metadata; +mod types; +#[cfg(test)] +mod tests; +pub mod extractors; +pub mod informant; +pub mod metadata; pub mod traits; -pub mod tests; -pub mod types; pub use self::traits::{Web3, Eth, EthFilter, EthPubSub, EthSigning, Net, Parity, ParityAccounts, ParitySet, ParitySigning, PubSub, Signer, Personal, Traces, Rpc, SecretStore}; pub use self::impls::*; -pub use self::helpers::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, block_import, informant, dispatch}; +pub use self::helpers::{NetworkSettings, block_import, dispatch}; pub use self::metadata::Metadata; pub use self::types::Origin; +pub use self::extractors::{RpcExtractor, WsExtractor, WsStats, WsDispatcher}; + +/// Signer utilities +pub mod signer { + pub use super::helpers::{SigningQueue, SignerService, ConfirmationsQueue}; + pub use super::types::{ConfirmationRequest, TransactionModification, U256, TransactionCondition}; +} + +/// Dapps integration utilities +pub mod dapps { + pub use super::helpers::dapps::DappsService; + pub use super::types::LocalDapp; +} diff --git a/rpc/src/v1/tests/helpers/dapps.rs b/rpc/src/v1/tests/helpers/dapps.rs new file mode 100644 index 000000000..44148ab0f --- /dev/null +++ b/rpc/src/v1/tests/helpers/dapps.rs @@ -0,0 +1,37 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Test implementation of dapps service. + +use v1::types::LocalDapp; +use v1::helpers::dapps::DappsService; + +/// Test implementation of dapps service. Will always return the same list of dapps. +#[derive(Default, Clone)] +pub struct TestDappsService; + +impl DappsService for TestDappsService { + fn list_dapps(&self) -> Vec { + vec![LocalDapp { + id: "skeleton".into(), + name: "Skeleton".into(), + description: "A skeleton dapp".into(), + version: "0.1".into(), + author: "Parity Technologies Ltd".into(), + icon_url: "title.png".into(), + }] + } +} diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs index 35bd60f56..aae48a2d2 100644 --- a/rpc/src/v1/tests/helpers/mod.rs +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -16,14 +16,16 @@ //! Test rpc services. -mod sync_provider; -mod miner_service; +mod dapps; mod fetch; +mod miner_service; mod snapshot_service; +mod sync_provider; mod update_service; -pub use self::sync_provider::{Config, TestSyncProvider}; -pub use self::miner_service::TestMinerService; +pub use self::dapps::TestDappsService; pub use self::fetch::TestFetch; +pub use self::miner_service::TestMinerService; pub use self::snapshot_service::TestSnapshotService; -pub use self::update_service::TestUpdater; \ No newline at end of file +pub use self::sync_provider::{Config, TestSyncProvider}; +pub use self::update_service::TestUpdater; diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index 23b68e853..aeeb7902d 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -41,8 +41,8 @@ pub struct Dependencies { pub settings: Arc, pub network: Arc, pub accounts: Arc, - pub dapps_interface: Option, - pub dapps_port: Option, + pub dapps_address: Option<(String, u16)>, + pub ws_address: Option<(String, u16)>, } impl Dependencies { @@ -66,8 +66,8 @@ impl Dependencies { }), network: Arc::new(TestManageNetwork), accounts: Arc::new(AccountProvider::transient_provider()), - dapps_interface: Some("127.0.0.1".into()), - dapps_port: Some(18080), + dapps_address: Some(("127.0.0.1".into(), 18080)), + ws_address: Some(("127.0.0.1".into(), 18546)), } } @@ -84,8 +84,8 @@ impl Dependencies { self.logger.clone(), self.settings.clone(), signer, - self.dapps_interface.clone(), - self.dapps_port, + self.dapps_address.clone(), + self.ws_address.clone(), ) } @@ -345,7 +345,7 @@ fn rpc_parity_node_name() { #[test] fn rpc_parity_unsigned_transactions_count() { let deps = Dependencies::new(); - let io = deps.with_signer(SignerService::new_test(Some(("127.0.0.1".into(), 18180)))); + let io = deps.with_signer(SignerService::new_test(true)); let request = r#"{"jsonrpc": "2.0", "method": "parity_unsignedTransactionsCount", "params":[], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; @@ -386,16 +386,17 @@ fn rpc_parity_encrypt() { } #[test] -fn rpc_parity_signer_port() { +fn rpc_parity_ws_address() { // given - let deps = Dependencies::new(); - let io1 = deps.with_signer(SignerService::new_test(Some(("127.0.0.1".into(), 18180)))); + let mut deps = Dependencies::new(); + let io1 = deps.default_client(); + deps.ws_address = None; let io2 = deps.default_client(); // when - let request = r#"{"jsonrpc": "2.0", "method": "parity_signerPort", "params": [], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":18180,"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Trusted Signer is disabled. This API is not available."},"id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_wsUrl", "params": [], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":"127.0.0.1:18546","id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"WebSockets Server is disabled. This API is not available."},"id":1}"#; // then assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); @@ -403,34 +404,16 @@ fn rpc_parity_signer_port() { } #[test] -fn rpc_parity_dapps_port() { +fn rpc_parity_dapps_address() { // given let mut deps = Dependencies::new(); let io1 = deps.default_client(); - deps.dapps_port = None; + deps.dapps_address = None; let io2 = deps.default_client(); // when - let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsPort", "params": [], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":18080,"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#; - - // then - assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); - assert_eq!(io2.handle_request_sync(request), Some(response2.to_owned())); -} - -#[test] -fn rpc_parity_dapps_interface() { - // given - let mut deps = Dependencies::new(); - let io1 = deps.default_client(); - deps.dapps_interface = None; - let io2 = deps.default_client(); - - // when - let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsInterface", "params": [], "id": 1}"#; - let response1 = r#"{"jsonrpc":"2.0","result":"127.0.0.1","id":1}"#; + let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsUrl", "params": [], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":"127.0.0.1:18080","id":1}"#; let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#; // then diff --git a/rpc/src/v1/tests/mocked/parity_set.rs b/rpc/src/v1/tests/mocked/parity_set.rs index 0e2d6ab27..cff9e710d 100644 --- a/rpc/src/v1/tests/mocked/parity_set.rs +++ b/rpc/src/v1/tests/mocked/parity_set.rs @@ -25,7 +25,7 @@ use ethsync::ManageNetwork; use jsonrpc_core::IoHandler; use v1::{ParitySet, ParitySetClient}; -use v1::tests::helpers::{TestMinerService, TestFetch, TestUpdater}; +use v1::tests::helpers::{TestMinerService, TestFetch, TestUpdater, TestDappsService}; use super::manage_network::TestManageNetwork; fn miner_service() -> Arc { @@ -46,8 +46,14 @@ fn updater_service() -> Arc { pub type TestParitySetClient = ParitySetClient; -fn parity_set_client(client: &Arc, miner: &Arc, updater: &Arc, net: &Arc) -> TestParitySetClient { - ParitySetClient::new(client, miner, updater, &(net.clone() as Arc), TestFetch::default()) +fn parity_set_client( + client: &Arc, + miner: &Arc, + updater: &Arc, + net: &Arc, +) -> TestParitySetClient { + let dapps_service = Arc::new(TestDappsService); + ParitySetClient::new(client, miner, updater, &(net.clone() as Arc), Some(dapps_service), TestFetch::default()) } #[test] @@ -232,3 +238,18 @@ fn rpc_parity_remove_transaction() { miner.pending_transactions.lock().insert(hash, signed); assert_eq!(io.handle_request_sync(&request), Some(response.to_owned())); } + +#[test] +fn rpc_parity_set_dapps_list() { + let miner = miner_service(); + let client = client_service(); + let network = network_service(); + let updater = updater_service(); + let mut io = IoHandler::new(); + io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate()); + + let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsList", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"author":"Parity Technologies Ltd","description":"A skeleton dapp","iconUrl":"title.png","id":"skeleton","name":"Skeleton","version":"0.1"}],"id":1}"#; + + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index afa008cf3..e2e54b2e3 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -58,7 +58,7 @@ fn miner_service() -> Arc { } fn signer_tester() -> SignerTester { - let signer = Arc::new(SignerService::new_test(None)); + let signer = Arc::new(SignerService::new_test(false)); let accounts = accounts_provider(); let opt_accounts = Some(accounts.clone()); let client = blockchain_client(); diff --git a/rpc/src/v1/tests/mocked/signing.rs b/rpc/src/v1/tests/mocked/signing.rs index 8963286ea..3e1f67154 100644 --- a/rpc/src/v1/tests/mocked/signing.rs +++ b/rpc/src/v1/tests/mocked/signing.rs @@ -47,7 +47,7 @@ struct SigningTester { impl Default for SigningTester { fn default() -> Self { - let signer = Arc::new(SignerService::new_test(None)); + let signer = Arc::new(SignerService::new_test(false)); let client = Arc::new(TestBlockChainClient::default()); let miner = Arc::new(TestMinerService::default()); let accounts = Arc::new(AccountProvider::transient_provider()); diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index 498d1b82d..92904aa40 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -151,17 +151,13 @@ build_rpc_trait! { #[rpc(name = "parity_localTransactions")] fn local_transactions(&self) -> Result, Error>; - /// Returns current Trusted Signer port or an error if signer is disabled. - #[rpc(name = "parity_signerPort")] - fn signer_port(&self) -> Result; + /// Returns current Dapps Server interface and port or an error if dapps server is disabled. + #[rpc(name = "parity_dappsUrl")] + fn dapps_url(&self) -> Result; - /// Returns current Dapps Server port or an error if dapps server is disabled. - #[rpc(name = "parity_dappsPort")] - fn dapps_port(&self) -> Result; - - /// Returns current Dapps Server interface address or an error if dapps server is disabled. - #[rpc(name = "parity_dappsInterface")] - fn dapps_interface(&self) -> Result; + /// Returns current WS Server interface and port or an error if ws server is disabled. + #[rpc(name = "parity_wsUrl")] + fn ws_url(&self) -> Result; /// Returns next nonce for particular sender. Should include all transactions in the queue. #[rpc(async, name = "parity_nextNonce")] diff --git a/rpc/src/v1/traits/parity_set.rs b/rpc/src/v1/traits/parity_set.rs index b91b33574..1feb39718 100644 --- a/rpc/src/v1/traits/parity_set.rs +++ b/rpc/src/v1/traits/parity_set.rs @@ -19,7 +19,7 @@ use jsonrpc_core::Error; use futures::BoxFuture; -use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction}; +use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; build_rpc_trait! { /// Parity-specific rpc interface for operations altering the settings. @@ -96,6 +96,10 @@ build_rpc_trait! { #[rpc(async, name = "parity_hashContent")] fn hash_content(&self, String) -> BoxFuture; + /// Returns a list of local dapps + #[rpc(name = "parity_dappsList")] + fn dapps_list(&self) -> Result, Error>; + /// Is there a release ready for install? #[rpc(name = "parity_upgradeReady")] fn upgrade_ready(&self) -> Result, Error>; diff --git a/rpc/src/v1/types/confirmations.rs b/rpc/src/v1/types/confirmations.rs index 9a6d6837a..bbc8b350e 100644 --- a/rpc/src/v1/types/confirmations.rs +++ b/rpc/src/v1/types/confirmations.rs @@ -283,12 +283,15 @@ mod tests { nonce: Some(1.into()), condition: None, }), - origin: Origin::Signer(5.into()), + origin: Origin::Signer { + dapp: "http://parity.io".into(), + session: 5.into(), + } }; // when let res = serde_json::to_string(&ConfirmationRequest::from(request)); - let expected = r#"{"id":"0xf","payload":{"sendTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":{"signer":"0x0000000000000000000000000000000000000000000000000000000000000005"}}"#; + let expected = r#"{"id":"0xf","payload":{"sendTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":{"signer":{"dapp":"http://parity.io","session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}}"#; // then assert_eq!(res.unwrap(), expected.to_owned()); diff --git a/rpc/src/v1/types/dapps.rs b/rpc/src/v1/types/dapps.rs new file mode 100644 index 000000000..fb4a868f0 --- /dev/null +++ b/rpc/src/v1/types/dapps.rs @@ -0,0 +1,57 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +/// Local Dapp +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct LocalDapp { + /// ID of local dapp + pub id: String, + /// Dapp name + pub name: String, + /// Dapp description + pub description: String, + /// Dapp version string + pub version: String, + /// Dapp author + pub author: String, + /// Dapp icon + #[serde(rename="iconUrl")] + pub icon_url: String, +} + +#[cfg(test)] +mod tests { + use serde_json; + use super::LocalDapp; + + #[test] + fn dapp_serialization() { + let s = r#"{"id":"skeleton","name":"Skeleton","description":"A skeleton dapp","version":"0.1","author":"Parity Technologies Ltd","iconUrl":"title.png"}"#; + + let dapp = LocalDapp { + id: "skeleton".into(), + name: "Skeleton".into(), + description: "A skeleton dapp".into(), + version: "0.1".into(), + author: "Parity Technologies Ltd".into(), + icon_url: "title.png".into(), + }; + + let serialized = serde_json::to_string(&dapp).unwrap(); + assert_eq!(serialized, s); + } +} diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index 97f2eb2ae..ef90d844b 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -24,6 +24,7 @@ mod bytes; mod call_request; mod confirmations; mod consensus_status; +mod dapps; mod derivation; mod filter; mod hash; @@ -55,6 +56,7 @@ pub use self::confirmations::{ TransactionModification, SignRequest, DecryptRequest, Either }; pub use self::consensus_status::*; +pub use self::dapps::LocalDapp; pub use self::derivation::{DeriveHash, DeriveHierarchical, Derive}; pub use self::filter::{Filter, FilterChanges}; pub use self::hash::{H64, H160, H256, H512, H520, H2048}; diff --git a/rpc/src/v1/types/provenance.rs b/rpc/src/v1/types/provenance.rs index 81201f8e4..b52f0cb77 100644 --- a/rpc/src/v1/types/provenance.rs +++ b/rpc/src/v1/types/provenance.rs @@ -33,12 +33,22 @@ pub enum Origin { /// IPC server (includes session hash) #[serde(rename="ipc")] Ipc(H256), - /// WS server (includes session hash) + /// WS server #[serde(rename="ws")] - Ws(H256), - /// Signer (includes session hash) + Ws { + /// Dapp id + dapp: DappId, + /// Session id + session: H256, + }, + /// Signer (authorized WS server) #[serde(rename="signer")] - Signer(H256), + Signer { + /// Dapp id + dapp: DappId, + /// Session id + session: H256 + }, /// Unknown #[serde(rename="unknown")] Unknown, @@ -53,11 +63,11 @@ impl Default for Origin { impl fmt::Display for Origin { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - Origin::Rpc(ref origin) => write!(f, "RPC (service: {})", origin), + Origin::Rpc(ref origin) => write!(f, "{} via RPC", origin), Origin::Dapps(ref origin) => write!(f, "Dapp {}", origin), Origin::Ipc(ref session) => write!(f, "IPC (session: {})", session), - Origin::Ws(ref session) => write!(f, "WebSocket (session: {})", session), - Origin::Signer(ref session) => write!(f, "UI (session: {})", session), + Origin::Ws { ref session, ref dapp } => write!(f, "{} via WebSocket (session: {})", dapp, session), + Origin::Signer { ref session, ref dapp } => write!(f, "{} via UI (session: {})", dapp, session), Origin::Unknown => write!(f, "unknown origin"), } } @@ -114,9 +124,15 @@ mod tests { let o1 = Origin::Rpc("test service".into()); let o2 = Origin::Dapps("http://parity.io".into()); let o3 = Origin::Ipc(5.into()); - let o4 = Origin::Signer(10.into()); + let o4 = Origin::Signer { + dapp: "http://parity.io".into(), + session: 10.into(), + }; let o5 = Origin::Unknown; - let o6 = Origin::Ws(5.into()); + let o6 = Origin::Ws { + dapp: "http://parity.io".into(), + session: 5.into(), + }; // when let res1 = serde_json::to_string(&o1).unwrap(); @@ -130,9 +146,9 @@ mod tests { assert_eq!(res1, r#"{"rpc":"test service"}"#); assert_eq!(res2, r#"{"dapp":"http://parity.io"}"#); assert_eq!(res3, r#"{"ipc":"0x0000000000000000000000000000000000000000000000000000000000000005"}"#); - assert_eq!(res4, r#"{"signer":"0x000000000000000000000000000000000000000000000000000000000000000a"}"#); + assert_eq!(res4, r#"{"signer":{"dapp":"http://parity.io","session":"0x000000000000000000000000000000000000000000000000000000000000000a"}}"#); assert_eq!(res5, r#""unknown""#); - assert_eq!(res6, r#"{"ws":"0x0000000000000000000000000000000000000000000000000000000000000005"}"#); + assert_eq!(res6, r#"{"ws":{"dapp":"http://parity.io","session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}"#); } #[test] diff --git a/rpc_cli/src/lib.rs b/rpc_cli/src/lib.rs index 8d00405b5..23aa06f15 100644 --- a/rpc_cli/src/lib.rs +++ b/rpc_cli/src/lib.rs @@ -7,7 +7,7 @@ extern crate ethcore_bigint as bigint; extern crate parity_rpc as rpc; extern crate parity_rpc_client as client; -use rpc::v1::types::{U256, ConfirmationRequest}; +use rpc::signer::{U256, ConfirmationRequest}; use client::signer_client::SignerRpc; use std::io::{Write, BufRead, BufReader, stdout, stdin}; use std::path::PathBuf; diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index c4e68acef..6af9b4d25 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -14,8 +14,8 @@ serde = "0.9" serde_json = "0.9" tempdir = "0.3.5" url = "1.2.0" +matches = "0.1" jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -ws = { git = "https://github.com/paritytech/ws-rs.git", branch = "parity-1.7" } +jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } parity-rpc = { path = "../rpc" } -ethcore-signer = { path = "../signer" } ethcore-util = { path = "../util" } diff --git a/rpc_client/src/client.rs b/rpc_client/src/client.rs index 3ef7ad7ce..f31570c7a 100644 --- a/rpc_client/src/client.rs +++ b/rpc_client/src/client.rs @@ -13,7 +13,7 @@ use util::{Hashable, Mutex}; use url::Url; use std::fs::File; -use ws::{ +use ws::ws::{ self, Request, Handler, @@ -204,6 +204,7 @@ impl Rpc { let rpc = Self::connect(url, authpath).map(|rpc| rpc).wait()?; rpc } + /// Non-blocking, returns a future pub fn connect( url: &str, authpath: &PathBuf @@ -241,6 +242,7 @@ impl Rpc { } } } + /// Non-blocking, returns a future of the request response pub fn request( &mut self, method: &'static str, params: Vec diff --git a/rpc_client/src/lib.rs b/rpc_client/src/lib.rs index e2f53e606..d1967ccbd 100644 --- a/rpc_client/src/lib.rs +++ b/rpc_client/src/lib.rs @@ -1,34 +1,36 @@ pub mod client; pub mod signer_client; -extern crate ethcore_signer; extern crate ethcore_util as util; extern crate futures; extern crate jsonrpc_core; +extern crate jsonrpc_ws_server as ws; extern crate parity_rpc as rpc; extern crate rand; extern crate serde; extern crate serde_json; extern crate tempdir; extern crate url; -extern crate ws; #[macro_use] extern crate log; +#[cfg(test)] +#[macro_use] +extern crate matches; + + #[cfg(test)] mod tests { - #[macro_use] - extern crate matches; use futures::Future; use std::path::PathBuf; use client::{Rpc, RpcError}; - use ethcore_signer; + use rpc; #[test] fn test_connection_refused() { - let (_srv, port, mut authcodes) = ethcore_signer::tests::serve(); + let (_srv, port, mut authcodes) = rpc::tests::ws::serve(); let _ = authcodes.generate_new(); authcodes.to_file(&authcodes.path).unwrap(); @@ -43,7 +45,7 @@ mod tests { #[test] fn test_authcode_fail() { - let (_srv, port, _) = ethcore_signer::tests::serve(); + let (_srv, port, _) = rpc::tests::ws::serve(); let path = PathBuf::from("nonexist"); let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &path); @@ -55,7 +57,7 @@ mod tests { #[test] fn test_authcode_correct() { - let (_srv, port, mut authcodes) = ethcore_signer::tests::serve(); + let (_srv, port, mut authcodes) = rpc::tests::ws::serve(); let _ = authcodes.generate_new(); authcodes.to_file(&authcodes.path).unwrap(); diff --git a/rpc_client/src/signer_client.rs b/rpc_client/src/signer_client.rs index 317dc0fec..ae051efb6 100644 --- a/rpc_client/src/signer_client.rs +++ b/rpc_client/src/signer_client.rs @@ -1,5 +1,5 @@ use client::{Rpc, RpcError}; -use rpc::v1::types::{ConfirmationRequest, TransactionModification, U256, TransactionCondition}; +use rpc::signer::{ConfirmationRequest, TransactionModification, U256, TransactionCondition}; use serde; use serde_json::{Value as JsonValue, to_value}; use std::path::PathBuf; @@ -13,11 +13,11 @@ impl SignerRpc { pub fn new(url: &str, authfile: &PathBuf) -> Result { Ok(SignerRpc { rpc: Rpc::new(&url, authfile)? }) } - pub fn requests_to_confirm(&mut self) -> - BoxFuture, RpcError>, Canceled> - { + + pub fn requests_to_confirm(&mut self) -> BoxFuture, RpcError>, Canceled> { self.rpc.request("signer_requestsToConfirm", vec![]) } + pub fn confirm_request( &mut self, id: U256, @@ -25,17 +25,15 @@ impl SignerRpc { new_gas_price: Option, new_condition: Option>, pwd: &str - ) -> BoxFuture, Canceled> - { + ) -> BoxFuture, Canceled> { self.rpc.request("signer_confirmRequest", vec![ Self::to_value(&format!("{:#x}", id)), Self::to_value(&TransactionModification { sender: None, gas_price: new_gas_price, gas: new_gas, condition: new_condition }), Self::to_value(&pwd), ]) } - pub fn reject_request(&mut self, id: U256) -> - BoxFuture, Canceled> - { + + pub fn reject_request(&mut self, id: U256) -> BoxFuture, Canceled> { self.rpc.request("signer_rejectRequest", vec![ JsonValue::String(format!("{:#x}", id)) ]) diff --git a/scripts/targets.sh b/scripts/targets.sh index f3ae6a2d5..fb10c43f2 100644 --- a/scripts/targets.sh +++ b/scripts/targets.sh @@ -7,7 +7,8 @@ export TARGETS=" -p ethcore-bigint\ -p parity-dapps \ -p parity-rpc \ - -p ethcore-signer \ + -p parity-rpc-client \ + -p rpc-cli \ -p ethcore-util \ -p ethcore-network \ -p ethcore-io \ diff --git a/signer/Cargo.toml b/signer/Cargo.toml deleted file mode 100644 index 4f7c0f179..000000000 --- a/signer/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -description = "Ethcore Trusted Signer" -homepage = "http://parity.io" -license = "GPL-3.0" -name = "ethcore-signer" -version = "1.7.0" -authors = ["Parity Technologies "] -build = "build.rs" - -[build-dependencies] -rustc_version = "0.1" - -[dependencies] -rand = "0.3.14" -jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -log = "0.3" -env_logger = "0.4" -ws = { git = "https://github.com/paritytech/ws-rs.git", branch = "parity-1.7" } -parity-dapps-glue = { version = "1.7", optional = true } -ethcore-util = { path = "../util" } -ethcore-io = { path = "../util/io" } -parity-rpc = { path = "../rpc" } -ethcore-devtools = { path = "../devtools" } -parity-ui = { path = "../dapps/ui", version = "1.4", optional = true } - -clippy = { version = "0.0.103", optional = true} - -[features] -dev = ["clippy"] -ui = ["parity-dapps-glue", "parity-ui", "parity-ui/no-precompiled-js"] -ui-precompiled = ["parity-dapps-glue", "parity-ui", "parity-ui/use-precompiled-js"] diff --git a/signer/src/lib.rs b/signer/src/lib.rs deleted file mode 100644 index a1c935c5b..000000000 --- a/signer/src/lib.rs +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -#![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] - -//! Signer module -//! -//! This module manages your private keys and accounts/identities -//! that can be used within Dapps. -//! -//! It exposes API (over `WebSockets`) accessed by Signer UIs. -//! Each transaction sent by Dapp is broadcasted to Signer UIs -//! and their responsibility is to confirm (or confirm and sign) -//! the transaction for you. -//! -//! ``` -//! extern crate jsonrpc_core; -//! extern crate jsonrpc_server_utils; -//! extern crate ethcore_signer; -//! extern crate parity_rpc; -//! -//! use std::sync::Arc; -//! use jsonrpc_core::IoHandler; -//! use jsonrpc_server_utils::reactor::RpcEventLoop; -//! use ethcore_signer::ServerBuilder; -//! use parity_rpc::ConfirmationsQueue; -//! -//! fn main() { -//! let queue = Arc::new(ConfirmationsQueue::default()); -//! let io = IoHandler::default(); -//! let event_loop = RpcEventLoop::spawn().unwrap(); -//! let remote = event_loop.remote(); -//! let _server = ServerBuilder::new(queue, "/tmp/authcodes".into()) -//! .start("127.0.0.1:8084".parse().unwrap(), io, remote); -//! } -//! ``` - -#[macro_use] -extern crate log; -extern crate env_logger; -extern crate rand; - -extern crate ethcore_io as io; -extern crate ethcore_util as util; -extern crate jsonrpc_core; -extern crate jsonrpc_server_utils; -extern crate parity_rpc as rpc; -extern crate ws; - -extern crate ethcore_devtools as devtools; - -mod authcode_store; -mod ws_server; - -/// Exported tests for use in signer RPC client testing -pub mod tests; -pub use authcode_store::*; -pub use ws_server::*; diff --git a/signer/src/ws_server/error_tpl.html b/signer/src/ws_server/error_tpl.html deleted file mode 100644 index 04a0f3c30..000000000 --- a/signer/src/ws_server/error_tpl.html +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - {meta} - {title} - - - -
-
-

{title}

-

{message}

-

{details}

-
-
- {version} -
- - diff --git a/signer/src/ws_server/mod.rs b/signer/src/ws_server/mod.rs deleted file mode 100644 index 7bff6cf19..000000000 --- a/signer/src/ws_server/mod.rs +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! `WebSockets` server. - -use ws; -use std::default::Default; -use std::net::SocketAddr; -use std::ops::Drop; -use std::path::PathBuf; -use std::sync::Arc; -use std::thread; -use std; - -use io::{PanicHandler, OnPanicListener, MayPanic}; -use jsonrpc_core::{Metadata, Middleware, MetaIoHandler}; -use jsonrpc_server_utils::tokio_core::reactor::Remote; -use rpc::{ConfirmationsQueue}; -use rpc::informant::RpcStats; - -mod session; - -pub use self::session::MetaExtractor; - -/// Signer startup error -#[derive(Debug)] -pub enum ServerError { - /// Wrapped `std::io::Error` - IoError(std::io::Error), - /// Other `ws-rs` error - WebSocket(ws::Error) -} - -impl From for ServerError { - fn from(err: ws::Error) -> Self { - match err.kind { - ws::ErrorKind::Io(e) => ServerError::IoError(e), - _ => ServerError::WebSocket(err), - } - } -} - -/// Dummy metadata extractor -#[derive(Clone)] -pub struct NoopExtractor; -impl session::MetaExtractor for NoopExtractor {} - -/// Builder for `WebSockets` server -pub struct ServerBuilder { - queue: Arc, - authcodes_path: PathBuf, - skip_origin_validation: bool, - stats: Option>, -} - -impl ServerBuilder { - /// Creates new `ServerBuilder` - pub fn new(queue: Arc, authcodes_path: PathBuf) -> Self { - ServerBuilder { - queue: queue, - authcodes_path: authcodes_path, - skip_origin_validation: false, - stats: None, - } - } - - /// If set to `true` server will not verify Origin of incoming requests. - /// Not recommended. Use only for development. - pub fn skip_origin_validation(mut self, skip: bool) -> Self { - self.skip_origin_validation = skip; - self - } - - /// Configure statistic collection - pub fn stats(mut self, stats: Arc) -> Self { - self.stats = Some(stats); - self - } - - /// Starts a new `WebSocket` server in separate thread. - /// Returns a `Server` handle which closes the server when droped. - pub fn start, H: Into>>( - self, - addr: SocketAddr, - handler: H, - remote: Remote, - ) -> Result { - self.start_with_extractor(addr, handler, remote, NoopExtractor) - } - - /// Starts a new `WebSocket` server in separate thread. - /// Returns a `Server` handle which closes the server when droped. - pub fn start_with_extractor, H: Into>, T: session::MetaExtractor>( - self, - addr: SocketAddr, - handler: H, - remote: Remote, - meta_extractor: T, - ) -> Result { - Server::start( - addr, - handler.into(), - remote, - self.queue, - self.authcodes_path, - self.skip_origin_validation, - self.stats, - meta_extractor, - ) - } - -} - -/// `WebSockets` server implementation. -pub struct Server { - handle: Option>, - broadcaster: ws::Sender, - queue: Arc, - panic_handler: Arc, - addr: SocketAddr, -} - -impl Server { - /// Returns the address this server is listening on - pub fn addr(&self) -> &SocketAddr { - &self.addr - } - - /// Starts a new `WebSocket` server in separate thread. - /// Returns a `Server` handle which closes the server when droped. - fn start, T: session::MetaExtractor>( - addr: SocketAddr, - handler: MetaIoHandler, - remote: Remote, - queue: Arc, - authcodes_path: PathBuf, - skip_origin_validation: bool, - stats: Option>, - meta_extractor: T, - ) -> Result { - let config = { - let mut config = ws::Settings::default(); - // accept only handshakes beginning with GET - config.method_strict = true; - // Was shutting down server when suspending on linux: - config.shutdown_on_interrupt = false; - config - }; - - // Create WebSocket - let origin = format!("{}", addr); - let port = addr.port(); - let ws = ws::Builder::new().with_settings(config).build( - session::Factory::new(handler, remote, origin, port, authcodes_path, skip_origin_validation, stats, meta_extractor) - )?; - - let panic_handler = PanicHandler::new_in_arc(); - let ph = panic_handler.clone(); - let broadcaster = ws.broadcaster(); - - // Spawn a thread with event loop - let handle = thread::spawn(move || { - ph.catch_panic(move || { - match ws.listen(addr).map_err(ServerError::from) { - Err(ServerError::IoError(io)) => die(format!( - "Signer: Could not start listening on specified address. Make sure that no other instance is running on Signer's port. Details: {:?}", - io - )), - Err(any_error) => die(format!( - "Signer: Unknown error occurred when starting Signer. Details: {:?}", - any_error - )), - Ok(server) => server, - } - }).unwrap(); - }); - - // Return a handle - Ok(Server { - handle: Some(handle), - broadcaster: broadcaster, - queue: queue, - panic_handler: panic_handler, - addr: addr, - }) - } -} - -impl MayPanic for Server { - fn on_panic(&self, closure: F) where F: OnPanicListener { - self.panic_handler.on_panic(closure); - } -} - -impl Drop for Server { - fn drop(&mut self) { - self.queue.finish(); - self.broadcaster.shutdown().unwrap(); - self.handle.take().unwrap().join().unwrap(); - } -} - -fn die(msg: String) -> ! { - println!("ERROR: {}", msg); - std::process::exit(1); -} diff --git a/signer/src/ws_server/session.rs b/signer/src/ws_server/session.rs deleted file mode 100644 index 91984ff05..000000000 --- a/signer/src/ws_server/session.rs +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Session handlers factory. - -use std::path::{PathBuf, Path}; -use std::sync::Arc; -use std::str::FromStr; - -use authcode_store::AuthCodes; -use jsonrpc_core::{Metadata, Middleware, MetaIoHandler}; -use jsonrpc_core::futures::Future; -use jsonrpc_server_utils::tokio_core::reactor::Remote; -use rpc::informant::RpcStats; -use util::{H256, version}; -use ws; - -#[cfg(feature = "parity-ui")] -mod ui { - extern crate parity_ui as ui; - extern crate parity_dapps_glue as dapps; - - use self::dapps::WebApp; - - #[derive(Default)] - pub struct Handler { - ui: ui::App, - } - - impl Handler { - pub fn handle(&self, req: &str) -> Option<&dapps::File> { - let file = match req { - "" | "/" => "index.html", - path => &path[1..], - }; - self.ui.file(file) - } - } -} -#[cfg(not(feature = "parity-ui"))] -mod ui { - pub struct File { - pub content: &'static [u8], - pub content_type: &'static str, - } - - #[derive(Default)] - pub struct Handler; - - impl Handler { - pub fn handle(&self, _req: &str) -> Option<&File> { - None - } - } -} - -const HOME_DOMAIN: &'static str = "parity.web3.site"; - -fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool { - match header.map(|h| String::from_utf8_lossy(h).into_owned()) { - Some(ref origin) if origin.starts_with("chrome-extension://") => true, - Some(ref origin) if origin.starts_with(self_origin) => true, - Some(ref origin) if origin.starts_with(&format!("http://{}", self_origin)) => true, - Some(ref origin) if origin.starts_with(HOME_DOMAIN) => true, - Some(ref origin) if origin.starts_with(&format!("http://{}", HOME_DOMAIN)) => true, - _ => false, - } -} - -fn auth_token_hash(codes_path: &Path, protocols: ws::Result>) -> Option { - match protocols { - Ok(ref protocols) if protocols.len() == 1 => { - let protocol = protocols[0]; - let mut split = protocol.split('_'); - let auth = split.next().and_then(|v| H256::from_str(v).ok()); - let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok()); - - if let (Some(auth), Some(time)) = (auth, time) { - // Check if the code is valid - AuthCodes::from_file(codes_path) - .ok() - .and_then(|mut codes| { - // remove old tokens - codes.clear_garbage(); - - let res = codes.is_valid(&auth, time); - // make sure to save back authcodes - it might have been modified - if codes.to_file(codes_path).is_err() { - warn!(target: "signer", "Couldn't save authorization codes to file."); - } - - if res { - Some(auth) - } else { - None - } - }) - } else { - None - } - }, - _ => None, - } -} - -fn add_headers(mut response: ws::Response, mime: &str) -> ws::Response { - let content_len = format!("{}", response.len()); - { - let mut headers = response.headers_mut(); - headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec())); - headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec())); - headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec())); - headers.push(("Server".into(), b"Parity/SignerUI".to_vec())); - headers.push(("Content-Length".into(), content_len.as_bytes().to_vec())); - headers.push(("Content-Type".into(), mime.as_bytes().to_vec())); - headers.push(("Connection".into(), b"close".to_vec())); - } - response -} - -/// Metadata extractor from session data. -pub trait MetaExtractor: Send + Clone + 'static { - /// Extract metadata for given session - fn extract_metadata(&self, _session_id: &H256) -> M { - Default::default() - } -} - -pub struct Session, T> { - session_id: H256, - out: ws::Sender, - skip_origin_validation: bool, - self_origin: String, - self_port: u16, - authcodes_path: PathBuf, - handler: Arc>, - remote: Remote, - file_handler: Arc, - stats: Option>, - meta_extractor: T, -} - -impl, T> Drop for Session { - fn drop(&mut self) { - self.stats.as_ref().map(|stats| stats.close_session()); - } -} - -impl, T: MetaExtractor> ws::Handler for Session { - fn on_request(&mut self, req: &ws::Request) -> ws::Result<(ws::Response)> { - trace!(target: "signer", "Handling request: {:?}", req); - - // TODO [ToDr] ws server is not handling proxied requests correctly: - // Trim domain name from resource part: - let resource = req.resource().trim_left_matches(&format!("http://{}:{}", HOME_DOMAIN, self.self_port)); - let resource = resource.trim_left_matches(&format!("http://{}", HOME_DOMAIN)); - - // Styles file is allowed for error pages to display nicely. - let is_styles_file = resource == "/styles.css"; - - // Check request origin and host header. - if !self.skip_origin_validation { - let origin = req.header("origin").or_else(|| req.header("Origin")).map(|x| &x[..]); - let host = req.header("host").or_else(|| req.header("Host")).map(|x| &x[..]); - - let is_valid = origin_is_allowed(&self.self_origin, origin) || (origin.is_none() && origin_is_allowed(&self.self_origin, host)); - let is_valid = is_styles_file || is_valid; - - if !is_valid { - warn!(target: "signer", "Blocked connection to Signer API from untrusted origin."); - return Ok(error( - ErrorType::Forbidden, - "URL Blocked", - "You are not allowed to access Trusted Signer using this URL.", - Some(&format!("Use: http://{}", self.self_origin)), - )); - } - } - - // PROXY requests when running behind home.parity - if req.method() == "CONNECT" { - let mut res = ws::Response::ok("".into()); - res.headers_mut().push(("Content-Length".into(), b"0".to_vec())); - res.headers_mut().push(("Connection".into(), b"keep-alive".to_vec())); - return Ok(res); - } - - // Detect if it's a websocket request - // (styles file skips origin validation, so make sure to prevent WS connections on this resource) - if req.header("sec-websocket-key").is_some() && !is_styles_file { - // Check authorization - let auth_token_hash = auth_token_hash(&self.authcodes_path, req.protocols()); - match auth_token_hash { - None => { - info!(target: "signer", "Unauthorized connection to Signer API blocked."); - return Ok(error(ErrorType::Forbidden, "Not Authorized", "Request to this API was not authorized.", None)); - }, - Some(auth) => { - self.session_id = auth; - }, - } - - let protocols = req.protocols().expect("Existence checked by authorization."); - let protocol = protocols.get(0).expect("Proved by authorization."); - return ws::Response::from_request(req).map(|mut res| { - // To make WebSockets connection successful we need to send back the protocol header. - res.set_protocol(protocol); - res - }); - } - - debug!(target: "signer", "Requesting resource: {:?}", resource); - // Otherwise try to serve a page. - Ok(self.file_handler.handle(resource) - .map_or_else( - // return 404 not found - || error(ErrorType::NotFound, "Not found", "Requested file was not found.", None), - // or serve the file - |f| add_headers(ws::Response::ok_raw(f.content.to_vec()), f.content_type) - )) - } - - fn on_message(&mut self, msg: ws::Message) -> ws::Result<()> { - let req = msg.as_text()?; - let out = self.out.clone(); - // TODO [ToDr] Move to on_connect - let metadata = self.meta_extractor.extract_metadata(&self.session_id); - - let future = self.handler.handle_request(req, metadata).map(move |response| { - if let Some(result) = response { - let res = out.send(result); - if let Err(e) = res { - warn!(target: "signer", "Error while sending response: {:?}", e); - } - } - }); - self.remote.spawn(move |_| future); - Ok(()) - } -} - -pub struct Factory, T> { - handler: Arc>, - remote: Remote, - skip_origin_validation: bool, - self_origin: String, - self_port: u16, - authcodes_path: PathBuf, - meta_extractor: T, - file_handler: Arc, - stats: Option>, -} - -impl, T> Factory { - pub fn new( - handler: MetaIoHandler, - remote: Remote, - self_origin: String, - self_port: u16, - authcodes_path: PathBuf, - skip_origin_validation: bool, - stats: Option>, - meta_extractor: T, - ) -> Self { - Factory { - handler: Arc::new(handler), - remote: remote, - skip_origin_validation: skip_origin_validation, - self_origin: self_origin, - self_port: self_port, - authcodes_path: authcodes_path, - meta_extractor: meta_extractor, - file_handler: Arc::new(ui::Handler::default()), - stats: stats, - } - } -} - -impl, T: MetaExtractor> ws::Factory for Factory { - type Handler = Session; - - fn connection_made(&mut self, sender: ws::Sender) -> Self::Handler { - self.stats.as_ref().map(|stats| stats.open_session()); - - Session { - session_id: 0.into(), - out: sender, - handler: self.handler.clone(), - remote: self.remote.clone(), - skip_origin_validation: self.skip_origin_validation, - self_origin: self.self_origin.clone(), - self_port: self.self_port, - authcodes_path: self.authcodes_path.clone(), - meta_extractor: self.meta_extractor.clone(), - file_handler: self.file_handler.clone(), - stats: self.stats.clone(), - } - } -} - -enum ErrorType { - NotFound, - Forbidden, -} - -fn error(error: ErrorType, title: &str, message: &str, details: Option<&str>) -> ws::Response { - let content = format!( - include_str!("./error_tpl.html"), - title=title, - meta="", - message=message, - details=details.unwrap_or(""), - version=version(), - ); - let res = match error { - ErrorType::NotFound => ws::Response::not_found(content), - ErrorType::Forbidden => ws::Response::forbidden(content), - }; - add_headers(res, "text/html") -} From e1fef5c73295aa070846ac4bef93a4afea6b441a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 24 May 2017 13:31:33 +0300 Subject: [PATCH 28/29] Update dependencies and bigint api (#5685) * update to latest bigint * bump elastic array and deps * fix rlp tests * also update all smallvec deps * fix doc test * reduce parking in attempt to fix CI bug * fix from/into electum bug * remove duplicate imports --- Cargo.lock | 56 +++++++++---------- ethcore/light/Cargo.toml | 2 +- ethcore/light/src/net/load_timer.rs | 2 +- ethcore/native_contracts/generator/src/lib.rs | 2 +- ethcore/src/action_params.rs | 2 +- ethcore/src/block.rs | 2 +- ethcore/src/builtin.rs | 2 +- ethcore/src/client/client.rs | 2 +- ethcore/src/engines/tendermint/params.rs | 2 +- ethcore/src/evm/evm.rs | 8 +-- ethcore/src/evm/factory.rs | 2 +- ethcore/src/evm/interpreter/memory.rs | 2 +- ethcore/src/executive.rs | 2 +- ethcore/src/miner/banning_queue.rs | 4 +- ethcore/src/miner/miner.rs | 2 +- .../src/miner/service_transaction_checker.rs | 2 +- ethcore/src/miner/transaction_queue.rs | 4 +- ethcore/src/snapshot/mod.rs | 2 +- ethcore/src/spec/genesis.rs | 2 +- ethcore/src/types/account_diff.rs | 2 +- ethcore/src/types/transaction.rs | 2 +- ethcrypto/Cargo.toml | 2 +- ethkey/Cargo.toml | 2 +- ethkey/src/extended.rs | 2 +- ethstore/Cargo.toml | 4 +- evmbin/benches/mod.rs | 2 +- evmbin/src/main.rs | 2 +- hw/Cargo.toml | 2 +- hw/src/ledger.rs | 2 +- hw/src/lib.rs | 2 +- json/src/uint.rs | 2 +- parity/blockchain.rs | 2 +- parity/configuration.rs | 2 +- parity/helpers.rs | 2 +- parity/informant.rs | 2 +- rpc/src/v1/helpers/dispatch.rs | 2 +- rpc/src/v1/helpers/light_fetch.rs | 2 +- rpc/src/v1/impls/light/eth.rs | 2 +- rpc/src/v1/impls/personal.rs | 2 +- rpc/src/v1/tests/eth.rs | 2 +- rpc/src/v1/tests/helpers/miner_service.rs | 2 +- rpc/src/v1/tests/mocked/eth.rs | 2 +- rpc/src/v1/tests/mocked/personal.rs | 2 +- rpc/src/v1/tests/mocked/signer.rs | 2 +- rpc/src/v1/tests/mocked/signing.rs | 5 +- rpc/src/v1/types/trace.rs | 1 - rpc/src/v1/types/uint.rs | 2 +- rpc_cli/Cargo.toml | 2 +- rpc_cli/src/lib.rs | 2 +- scripts/contractABI.js | 1 - sync/Cargo.toml | 4 +- sync/src/chain.rs | 2 +- updater/src/operations.rs | 2 +- util/Cargo.toml | 6 +- util/benches/bigint.rs | 2 +- util/bigint/Cargo.toml | 4 +- util/bigint/src/hash.rs | 2 +- util/rlp/Cargo.toml | 4 +- util/rlp/benches/rlp.rs | 2 +- util/rlp/src/impls.rs | 2 +- 60 files changed, 96 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b63349f77..88edf74e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -69,11 +69,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bigint" -version = "1.0.5" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -317,10 +317,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "elastic-array" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -416,10 +416,9 @@ dependencies = [ [[package]] name = "ethcore-bigint" version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bigint 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -529,7 +528,7 @@ dependencies = [ "rlp 0.2.0", "serde 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -636,14 +635,14 @@ version = "1.7.0" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "elastic-array 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "elastic-array 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)", - "ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.7.0", "ethcore-logger 1.7.0", - "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -670,7 +669,7 @@ name = "ethcrypto" version = "0.1.0" dependencies = [ "eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)", - "ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethkey 0.2.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -695,7 +694,7 @@ dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)", - "ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", @@ -708,7 +707,7 @@ name = "ethstore" version = "0.1.0" dependencies = [ "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", @@ -722,7 +721,7 @@ dependencies = [ "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -744,13 +743,13 @@ dependencies = [ "ethcore-network 1.7.0", "ethcore-util 1.7.0", "ethkey 0.2.0", - "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -864,7 +863,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "hardware-wallet" version = "1.7.0" dependencies = [ - "ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethkey 0.2.0", "hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)", "libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)", @@ -875,7 +874,7 @@ dependencies = [ [[package]] name = "heapsize" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2092,8 +2091,8 @@ name = "rlp" version = "0.2.0" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "elastic-array 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "elastic-array 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2154,7 +2153,7 @@ dependencies = [ name = "rpc-cli" version = "1.4.0" dependencies = [ - "ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "bigint 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.7.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc 1.7.0", @@ -2386,10 +2385,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "smallvec" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2863,7 +2862,7 @@ dependencies = [ "checksum aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfdf7355d9db158df68f976ed030ab0f6578af811f5a7bb6dcf221ec24e0e0" "checksum base-x 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f59103b47307f76e03bef1633aec7fa9e29bfb5aa6daf5a334f94233c71f6c1" "checksum base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9605ba46d61df0410d8ac686b0007add8172eba90e8e909c347856fe794d8c" -"checksum bigint 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "5d1b3ef6756498df0e2c6bb67c065f4154d0ecd721eb5b3c3f865c8012b9fd74" +"checksum bigint 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d493e6869ed11b135750f4a4f44d574a52bf8f67e656cdc15b4085316c2098b6" "checksum bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e103c8b299b28a9c6990458b7013dc4a8356a9b854c51b9883241f5866fac36e" "checksum bit-set 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6e1e6fb1c9e3d6fcdec57216a74eaa03e41f52a22f13a16438251d8e88b89da" "checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c" @@ -2895,11 +2894,10 @@ dependencies = [ "checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8" "checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" "checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91" -"checksum elastic-array 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "71a64decd4b8cd06654a4e643c45cb558ad554abbffd82a7e16e34f45f51b605" +"checksum elastic-array 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "561b1b1bb58e6d9212b75a28cca442f8a87cceb35cb1b6d6f39f5df5346a9160" "checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83" "checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "" "checksum ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63df67d0af5e3cb906b667ca1a6e00baffbed87d0d8f5f78468a1f5eb3a66345" -"checksum ethcore-bigint 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5d237300af825a8d78f4c0dc835b0eab76a207e9df4aa088d91e162a173e0ca0" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" "checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" @@ -2910,7 +2908,7 @@ dependencies = [ "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90d069fe6beb9be359ef505650b3f73228c5591a3c4b1f32be2f4f44459ffa3a" "checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1" -"checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c" +"checksum heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4c7593b1522161003928c959c20a2ca421c68e940d63d75573316a009e48a6d4" "checksum heck 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f807d2f64cc044a6bcf250ff23e59be0deec7a16612c014f962a06fa7e020f9" "checksum hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)" = "" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" @@ -3045,7 +3043,7 @@ dependencies = [ "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410" "checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" -"checksum smallvec 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dca03f2f42500a9ef8ac0d16183dff8bed40e3dcf98f9d4147928548d5c4236e" +"checksum smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2e40af10aafe98b4d8294ae8388d8a5cd0707c65d364872efe72d063ec44bee0" "checksum spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93bdab61c1a413e591c4d17388ffa859eaff2df27f1e13a5ec8b716700605adf" "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 1f8d48ac0..ab45eedbe 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -20,7 +20,7 @@ ethcore-ipc = { path = "../../ipc/rpc", optional = true } ethcore-devtools = { path = "../../devtools" } rlp = { path = "../../util/rlp" } time = "0.1" -smallvec = "0.3.1" +smallvec = "0.4" futures = "0.1" rand = "0.3" itertools = "0.5" diff --git a/ethcore/light/src/net/load_timer.rs b/ethcore/light/src/net/load_timer.rs index 190747d40..9c6afc7cc 100644 --- a/ethcore/light/src/net/load_timer.rs +++ b/ethcore/light/src/net/load_timer.rs @@ -32,7 +32,7 @@ use request::{CompleteRequest, Kind}; use bincode; use time; -use util::{Uint, RwLock, Mutex}; +use util::{RwLock, Mutex}; /// Number of time periods samples should be kept for. pub const MOVING_SAMPLE_SIZE: usize = 256; diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs index 41d5319bc..540d2ec2b 100644 --- a/ethcore/native_contracts/generator/src/lib.rs +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -48,7 +48,7 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result { use byteorder::{{BigEndian, ByteOrder}}; use futures::{{future, Future, IntoFuture, BoxFuture}}; use ethabi::{{Contract, Interface, Token, Event}}; -use util::{{self, Uint}}; +use util; /// Generated Rust bindings to an Ethereum contract. #[derive(Clone, Debug)] diff --git a/ethcore/src/action_params.rs b/ethcore/src/action_params.rs index ef9c00523..9ff86418e 100644 --- a/ethcore/src/action_params.rs +++ b/ethcore/src/action_params.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . //! Evm input params. -use util::{Address, Bytes, Uint, U256}; +use util::{Address, Bytes, U256}; use util::hash::{H256}; use util::sha3::{Hashable, SHA3_EMPTY}; use ethjson; diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index a983b229a..e19a7e27b 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use std::collections::HashSet; use rlp::{UntrustedRlp, RlpStream, Encodable, Decodable, DecoderError}; -use util::{Bytes, Address, Uint, Hashable, U256, H256, ordered_trie_root, SHA3_NULL_RLP}; +use util::{Bytes, Address, Hashable, U256, H256, ordered_trie_root, SHA3_NULL_RLP}; use util::error::{Mismatch, OutOfBounds}; use basic_types::{LogBloom, Seal}; diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index 12d4d9ac1..d567701d3 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -23,7 +23,7 @@ use crypto::ripemd160::Ripemd160 as Ripemd160Digest; use crypto::digest::Digest; use num::{BigUint, Zero, One}; -use util::{U256, H256, Uint, Hashable, BytesRef}; +use util::{U256, H256, Hashable, BytesRef}; use ethkey::{Signature, recover as ec_recover}; use ethjson; diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index f48d94cff..5c43cee35 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -25,7 +25,7 @@ use time::precise_time_ns; // util use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, MutexGuard, Hashable}; use util::{journaldb, DBValue, TrieFactory, Trie}; -use util::{U256, H256, Address, H2048, Uint}; +use util::{U256, H256, Address, H2048}; use util::trie::TrieSpec; use util::kvdb::*; diff --git a/ethcore/src/engines/tendermint/params.rs b/ethcore/src/engines/tendermint/params.rs index 34c743c49..16b47a0fb 100644 --- a/ethcore/src/engines/tendermint/params.rs +++ b/ethcore/src/engines/tendermint/params.rs @@ -17,7 +17,7 @@ //! Tendermint specific parameters. use ethjson; -use util::{U256, Uint, Address}; +use util::{U256, Address}; use time::Duration; use super::super::validator_set::{ValidatorSet, new_validator_set}; use super::super::transition::Timeouts; diff --git a/ethcore/src/evm/evm.rs b/ethcore/src/evm/evm.rs index e03ded071..06be23ea5 100644 --- a/ethcore/src/evm/evm.rs +++ b/ethcore/src/evm/evm.rs @@ -17,7 +17,7 @@ //! Evm interface. use std::{ops, cmp, fmt}; -use util::{U128, U256, U512, Uint, trie}; +use util::{U128, U256, U512, trie}; use action_params::ActionParams; use evm::Ext; use builtin; @@ -181,11 +181,11 @@ impl CostType for U256 { } fn overflow_add(self, other: Self) -> (Self, bool) { - Uint::overflowing_add(self, other) + self.overflowing_add(other) } fn overflow_mul(self, other: Self) -> (Self, bool) { - Uint::overflowing_mul(self, other) + self.overflowing_mul(other) } fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) { @@ -250,7 +250,7 @@ pub trait Evm { #[cfg(test)] mod tests { - use util::{U256, Uint}; + use util::U256; use super::CostType; #[test] diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs index 8efb67cbd..fc16b1955 100644 --- a/ethcore/src/evm/factory.rs +++ b/ethcore/src/evm/factory.rs @@ -20,7 +20,7 @@ use std::fmt; use std::sync::Arc; use evm::Evm; -use util::{U256, Uint}; +use util::U256; use super::interpreter::SharedCache; #[derive(Debug, PartialEq, Clone)] diff --git a/ethcore/src/evm/interpreter/memory.rs b/ethcore/src/evm/interpreter/memory.rs index 8a738f2f1..39c0d8025 100644 --- a/ethcore/src/evm/interpreter/memory.rs +++ b/ethcore/src/evm/interpreter/memory.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::{U256, Uint}; +use util::U256; pub trait Memory { /// Retrieve current size of the memory diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 33f7f59d5..dd7abc6b5 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -561,7 +561,7 @@ mod tests { use std::sync::Arc; use ethkey::{Generator, Random}; use super::*; - use util::{H256, U256, U512, Address, Uint, FromHex, FromStr}; + use util::{H256, U256, U512, Address, FromHex, FromStr}; use util::bytes::BytesRef; use action_params::{ActionParams, ActionValue}; use env_info::EnvInfo; diff --git a/ethcore/src/miner/banning_queue.rs b/ethcore/src/miner/banning_queue.rs index a77008ba6..20b46d760 100644 --- a/ethcore/src/miner/banning_queue.rs +++ b/ethcore/src/miner/banning_queue.rs @@ -24,7 +24,7 @@ use transient_hashmap::TransientHashMap; use miner::{TransactionQueue, TransactionQueueDetailsProvider, TransactionImportResult, TransactionOrigin}; use miner::transaction_queue::QueuingInstant; use error::{Error, TransactionError}; -use util::{Uint, U256, H256, Address, Hashable}; +use util::{U256, H256, Address, Hashable}; type Count = u16; @@ -215,7 +215,7 @@ mod tests { use error::{Error, TransactionError}; use client::TransactionImportResult; use miner::{TransactionQueue, TransactionOrigin}; - use util::{Uint, U256, Address, FromHex, Hashable}; + use util::{U256, Address, FromHex, Hashable}; use miner::transaction_queue::test::DummyTransactionDetailsProvider; fn queue() -> BanningTransactionQueue { diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 38e97f683..101f26244 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -1261,7 +1261,7 @@ mod tests { use super::super::{MinerService, PrioritizationStrategy}; use super::*; use block::IsBlock; - use util::{U256, Uint, FromHex}; + use util::{U256, FromHex}; use ethkey::{Generator, Random}; use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult}; use header::BlockNumber; diff --git a/ethcore/src/miner/service_transaction_checker.rs b/ethcore/src/miner/service_transaction_checker.rs index a0a75647f..d21643772 100644 --- a/ethcore/src/miner/service_transaction_checker.rs +++ b/ethcore/src/miner/service_transaction_checker.rs @@ -20,7 +20,7 @@ use types::ids::BlockId; use futures::{future, Future}; use native_contracts::ServiceTransactionChecker as Contract; -use util::{U256, Uint, Mutex}; +use util::{U256, Mutex}; const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker"; diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index 4ae10bece..03a2d37ad 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -29,7 +29,7 @@ //! extern crate ethkey; //! extern crate rustc_serialize; //! -//! use util::{Uint, U256, Address}; +//! use util::{U256, Address}; //! use ethkey::{Random, Generator}; //! use ethcore::miner::{TransactionQueue, RemovalReason, TransactionQueueDetailsProvider, AccountDetails, TransactionOrigin}; //! use ethcore::transaction::*; @@ -105,7 +105,7 @@ use std::cmp::Ordering; use std::cmp; use std::collections::{HashSet, HashMap, BTreeSet, BTreeMap}; use linked_hash_map::LinkedHashMap; -use util::{Address, H256, Uint, U256}; +use util::{Address, H256, U256}; use util::table::Table; use transaction::*; use error::{Error, TransactionError}; diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 36c50f227..21317b551 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -29,7 +29,7 @@ use engines::Engine; use header::Header; use ids::BlockId; -use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256, Uint}; +use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256}; use util::Mutex; use util::hash::{H256}; use util::journaldb::{self, Algorithm, JournalDB}; diff --git a/ethcore/src/spec/genesis.rs b/ethcore/src/spec/genesis.rs index bf5ec46c1..b7db9aa90 100644 --- a/ethcore/src/spec/genesis.rs +++ b/ethcore/src/spec/genesis.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::{Address, H256, Uint, U256}; +use util::{Address, H256, U256}; use util::sha3::SHA3_NULL_RLP; use ethjson; use super::seal::Seal; diff --git a/ethcore/src/types/account_diff.rs b/ethcore/src/types/account_diff.rs index fe41af8ee..4ed0f5ee9 100644 --- a/ethcore/src/types/account_diff.rs +++ b/ethcore/src/types/account_diff.rs @@ -19,7 +19,7 @@ use std::cmp::*; use std::fmt; use std::collections::BTreeMap; -use util::{U256, H256, Uint, Bytes}; +use util::{U256, H256, Bytes}; use ipc::binary::BinaryConvertable; #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/ethcore/src/types/transaction.rs b/ethcore/src/types/transaction.rs index 2b17cee25..547be3136 100644 --- a/ethcore/src/types/transaction.rs +++ b/ethcore/src/types/transaction.rs @@ -19,7 +19,7 @@ use std::ops::Deref; use rlp::*; use util::sha3::Hashable; -use util::{H256, Address, U256, Bytes, HeapSizeOf, Uint}; +use util::{H256, Address, U256, Bytes, HeapSizeOf}; use ethkey::{Signature, Secret, Public, recover, public_to_address, Error as EthkeyError}; use error::*; use evm::Schedule; diff --git a/ethcrypto/Cargo.toml b/ethcrypto/Cargo.toml index 97fee67c6..84f905804 100644 --- a/ethcrypto/Cargo.toml +++ b/ethcrypto/Cargo.toml @@ -8,5 +8,5 @@ rust-crypto = "0.2.36" tiny-keccak = "1.0" eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } ethkey = { path = "../ethkey" } -ethcore-bigint = "0.1.2" +ethcore-bigint = { path = "../util/bigint" } diff --git a/ethkey/Cargo.toml b/ethkey/Cargo.toml index 6c54804f4..8254e194a 100644 --- a/ethkey/Cargo.toml +++ b/ethkey/Cargo.toml @@ -10,7 +10,7 @@ tiny-keccak = "1.0" eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } rustc-serialize = "0.3" docopt = { version = "0.7", optional = true } -ethcore-bigint = "0.1.2" +ethcore-bigint = { path = "../util/bigint" } rust-crypto = "0.2" byteorder = "1.0" diff --git a/ethkey/src/extended.rs b/ethkey/src/extended.rs index 7df6fde1c..45ed0547f 100644 --- a/ethkey/src/extended.rs +++ b/ethkey/src/extended.rs @@ -212,7 +212,7 @@ mod derivation { use rcrypto::mac::Mac; use rcrypto::sha2::Sha512; use bigint::hash::{H512, H256}; - use bigint::prelude::{U256, U512, Uint}; + use bigint::prelude::{U256, U512}; use secp256k1::key::{SecretKey, PublicKey}; use SECP256K1; use keccak; diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index 174a9e3dc..4c6ece5f5 100755 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -19,8 +19,8 @@ time = "0.1.34" itertools = "0.5" parking_lot = "0.4" ethcrypto = { path = "../ethcrypto" } -ethcore-bigint = "0.1.2" -smallvec = "0.3.1" +ethcore-bigint = { path = "../util/bigint" } +smallvec = "0.4" parity-wordlist = "1.0" tempdir = "0.3" diff --git a/evmbin/benches/mod.rs b/evmbin/benches/mod.rs index 5f21d2ebc..7ddc50681 100644 --- a/evmbin/benches/mod.rs +++ b/evmbin/benches/mod.rs @@ -32,7 +32,7 @@ use self::test::{Bencher, black_box}; use evm::run_vm; use ethcore::action_params::ActionParams; -use ethcore_util::{U256, Uint}; +use ethcore_util::U256; use rustc_serialize::hex::FromHex; #[bench] diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 02513eebd..505eae86e 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -30,7 +30,7 @@ use std::time::{Instant, Duration}; use std::fmt; use std::str::FromStr; use docopt::Docopt; -use util::{U256, FromHex, Uint, Bytes}; +use util::{U256, FromHex, Bytes}; use ethcore::evm::{self, Factory, VMType, Finalize}; use ethcore::action_params::ActionParams; diff --git a/hw/Cargo.toml b/hw/Cargo.toml index cd0124379..ca236d130 100644 --- a/hw/Cargo.toml +++ b/hw/Cargo.toml @@ -12,7 +12,7 @@ parking_lot = "0.4" hidapi = { git = "https://github.com/paritytech/hidapi-rs" } libusb = { git = "https://github.com/paritytech/libusb-rs" } ethkey = { path = "../ethkey" } -ethcore-bigint = "0.1.2" +ethcore-bigint = { path = "../util/bigint" } [dev-dependencies] rustc-serialize = "0.3" diff --git a/hw/src/ledger.rs b/hw/src/ledger.rs index bc449e251..2f209afa5 100644 --- a/hw/src/ledger.rs +++ b/hw/src/ledger.rs @@ -24,7 +24,7 @@ use std::str::FromStr; use std::time::Duration; use super::WalletInfo; use ethkey::{Address, Signature}; -use ethcore_bigint::hash::H256; +use bigint::hash::H256; const LEDGER_VID: u16 = 0x2c97; const LEDGER_PIDS: [u16; 2] = [0x0000, 0x0001]; // Nano S and Blue diff --git a/hw/src/lib.rs b/hw/src/lib.rs index c364fd015..a35607305 100644 --- a/hw/src/lib.rs +++ b/hw/src/lib.rs @@ -20,7 +20,7 @@ extern crate parking_lot; extern crate hidapi; extern crate libusb; extern crate ethkey; -extern crate ethcore_bigint; +extern crate ethcore_bigint as bigint; #[macro_use] extern crate log; #[cfg(test)] extern crate rustc_serialize; diff --git a/json/src/uint.rs b/json/src/uint.rs index 6b206b380..d408b76dc 100644 --- a/json/src/uint.rs +++ b/json/src/uint.rs @@ -20,7 +20,7 @@ use std::fmt; use std::str::FromStr; use serde::{Deserialize, Deserializer}; use serde::de::{Error, Visitor}; -use util::{U256, Uint as U}; +use util::U256; /// Lenient uint json deserialization for test json files. #[derive(Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy)] diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 757f303dd..84af882ca 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -22,7 +22,7 @@ use std::thread::sleep; use std::sync::Arc; use rustc_serialize::hex::FromHex; use io::{PanicHandler, ForwardPanic}; -use util::{ToPretty, Uint, U256, H256, Address, Hashable}; +use util::{ToPretty, U256, H256, Address, Hashable}; use rlp::PayloadInfo; use ethcore::service::ClientService; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockId}; diff --git a/parity/configuration.rs b/parity/configuration.rs index ad1353d09..6ac7e3e99 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -21,7 +21,7 @@ use std::path::{Path, PathBuf}; use std::collections::BTreeMap; use std::cmp::max; use cli::{Args, ArgsError}; -use util::{Hashable, H256, U256, Uint, Bytes, version_data, Address}; +use util::{Hashable, H256, U256, Bytes, version_data, Address}; use util::journaldb::Algorithm; use util::Colour; use ethsync::{NetworkConfiguration, is_valid_node_url, AllowIP}; diff --git a/parity/helpers.rs b/parity/helpers.rs index 449d8f569..31129f18b 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -18,7 +18,7 @@ use std::{io, env}; use std::io::{Write, BufReader, BufRead}; use std::time::Duration; use std::fs::File; -use util::{clean_0x, U256, Uint, Address, CompactionProfile}; +use util::{clean_0x, U256, Address, CompactionProfile}; use util::journaldb::Algorithm; use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType}; use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy}; diff --git a/parity/informant.rs b/parity/informant.rs index 6b21eefc0..1f309848c 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -24,7 +24,7 @@ use std::time::{Instant, Duration}; use io::{TimerToken, IoContext, IoHandler}; use isatty::{stdout_isatty}; use ethsync::{SyncProvider, ManageNetwork}; -use util::{Uint, RwLock, Mutex, H256, Colour, Bytes}; +use util::{RwLock, Mutex, H256, Colour, Bytes}; use ethcore::client::*; use ethcore::service::ClientIoMessage; use ethcore::snapshot::service::Service as SnapshotService; diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index efd118967..31223c77e 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -26,7 +26,7 @@ use light::client::LightChainClient; use light::on_demand::{request, OnDemand}; use light::TransactionQueue as LightTransactionQueue; use rlp; -use util::{Address, H520, H256, U256, Uint, Bytes, Mutex, RwLock}; +use util::{Address, H520, H256, U256, Bytes, Mutex, RwLock}; use util::sha3::Hashable; use stats::Corpus; diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 8bcd5115a..6f8e29171 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -35,7 +35,7 @@ use light::on_demand::{request, OnDemand, HeaderRef, Request as OnDemandRequest, use light::request::Field; use ethsync::LightSync; -use util::{Address, Mutex, Uint, U256}; +use util::{Address, Mutex, U256}; use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch}; use v1::types::{BlockNumber, CallRequest}; diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index fc61b0605..ca1157206 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -39,7 +39,7 @@ use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransact use ethsync::LightSync; use rlp::UntrustedRlp; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use util::{RwLock, Mutex, Uint, U256}; +use util::{RwLock, Mutex, U256}; use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 6e2be5ecc..84c29399c 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -20,7 +20,7 @@ use std::sync::{Arc, Weak}; use ethcore::account_provider::AccountProvider; use ethcore::transaction::PendingTransaction; -use util::{Address, U128, Uint, ToPretty}; +use util::{Address, U128, ToPretty}; use futures::{future, Future, BoxFuture}; use jsonrpc_core::Error; diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index c2093a51b..cf5bdd88b 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -28,7 +28,7 @@ use ethcore::miner::{MinerOptions, Banning, GasPricer, MinerService, ExternalMin use ethcore::account_provider::AccountProvider; use ethjson::blockchain::BlockChain; use io::IoChannel; -use util::{U256, H256, Uint, Address, Hashable}; +use util::{U256, H256, Address, Hashable}; use jsonrpc_core::IoHandler; use v1::impls::{EthClient, SigningUnsafeClient}; diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index 4cdfbbd8c..1bf14da7c 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -17,7 +17,7 @@ //! Test implementation of miner service. use std::collections::hash_map::Entry; -use util::{Address, H256, Bytes, U256, Uint}; +use util::{Address, H256, Bytes, U256}; use util::standard::*; use ethcore::error::{Error, CallError}; use ethcore::client::{MiningBlockChainClient, Executed, CallAnalytics}; diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index c446b1874..3cb0626a1 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -22,7 +22,7 @@ use rustc_serialize::hex::{FromHex, ToHex}; use time::get_time; use rlp; -use util::{Uint, U256, Address, H256, Mutex}; +use util::{U256, Address, H256, Mutex}; use ethkey::Secret; use ethcore::account_provider::AccountProvider; use ethcore::client::{TestBlockChainClient, EachBlockWith, Executed, TransactionId}; diff --git a/rpc/src/v1/tests/mocked/personal.rs b/rpc/src/v1/tests/mocked/personal.rs index 6618ea61c..8508eaacf 100644 --- a/rpc/src/v1/tests/mocked/personal.rs +++ b/rpc/src/v1/tests/mocked/personal.rs @@ -21,7 +21,7 @@ use ethcore::account_provider::AccountProvider; use ethcore::client::TestBlockChainClient; use ethcore::transaction::{Action, Transaction}; use jsonrpc_core::IoHandler; -use util::{U256, Uint, Address}; +use util::{U256, Address}; use v1::{PersonalClient, Personal, Metadata}; use v1::helpers::dispatch::FullDispatcher; diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index e2e54b2e3..90efe12e1 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use std::str::FromStr; -use util::{U256, Uint, Address, ToPretty}; +use util::{U256, Address, ToPretty}; use ethcore::account_provider::AccountProvider; use ethcore::client::TestBlockChainClient; diff --git a/rpc/src/v1/tests/mocked/signing.rs b/rpc/src/v1/tests/mocked/signing.rs index 3e1f67154..b3322da46 100644 --- a/rpc/src/v1/tests/mocked/signing.rs +++ b/rpc/src/v1/tests/mocked/signing.rs @@ -28,7 +28,7 @@ use v1::types::ConfirmationResponse; use v1::tests::helpers::TestMinerService; use v1::tests::mocked::parity; -use util::{Address, Uint, U256, ToPretty}; +use util::{Address, U256, ToPretty}; use ethkey::Secret; use ethcore::account_provider::AccountProvider; use ethcore::client::TestBlockChainClient; @@ -420,7 +420,6 @@ fn should_add_decryption_to_the_queue() { }"#; let response = r#"{"jsonrpc":"2.0","result":"0x0102","id":1}"#; - // then let promise = tester.io.handle_request(&request); @@ -432,7 +431,7 @@ fn should_add_decryption_to_the_queue() { signer.request_confirmed(1.into(), Ok(ConfirmationResponse::Decrypt(vec![0x1, 0x2].into()))); break } - ::std::thread::sleep(Duration::from_millis(100)) + ::std::thread::sleep(Duration::from_millis(10)) }); // check response: will deadlock if unsuccessful. diff --git a/rpc/src/v1/types/trace.rs b/rpc/src/v1/types/trace.rs index f4f7de5e7..f91100421 100644 --- a/rpc/src/v1/types/trace.rs +++ b/rpc/src/v1/types/trace.rs @@ -23,7 +23,6 @@ use ethcore::state_diff; use ethcore::account_diff; use ethcore::executed; use ethcore::client::Executed; -use util::Uint; use v1::types::{Bytes, H160, H256, U256}; #[derive(Debug, Serialize)] diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index e61f0ab41..58e2bad4f 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -17,7 +17,7 @@ use std::str::FromStr; use std::fmt; use serde; -use util::{U256 as EthU256, U128 as EthU128, Uint}; +use util::{U256 as EthU256, U128 as EthU128}; macro_rules! impl_uint { ($name: ident, $other: ident, $size: expr) => { diff --git a/rpc_cli/Cargo.toml b/rpc_cli/Cargo.toml index 77fee345b..e8776f637 100644 --- a/rpc_cli/Cargo.toml +++ b/rpc_cli/Cargo.toml @@ -9,7 +9,7 @@ version = "1.4.0" [dependencies] futures = "0.1" rpassword = "0.3.0" -ethcore-bigint = "0.1.2" +bigint = "2.0" parity-rpc = { path = "../rpc" } parity-rpc-client = { path = "../rpc_client" } ethcore-util = { path = "../util" } diff --git a/rpc_cli/src/lib.rs b/rpc_cli/src/lib.rs index 23aa06f15..14bb5c706 100644 --- a/rpc_cli/src/lib.rs +++ b/rpc_cli/src/lib.rs @@ -2,7 +2,7 @@ extern crate futures; extern crate rpassword; extern crate ethcore_util as util; -extern crate ethcore_bigint as bigint; +extern crate bigint; extern crate parity_rpc as rpc; extern crate parity_rpc_client as client; diff --git a/scripts/contractABI.js b/scripts/contractABI.js index 98081123a..4b9e489a9 100644 --- a/scripts/contractABI.js +++ b/scripts/contractABI.js @@ -32,7 +32,6 @@ use std::string::String; use std::result::Result; use std::fmt; use {util, ethabi}; -use util::Uint; ${convertContract(name, json, prefs)} `; diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 3869b9885..a4fe665e1 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -23,10 +23,10 @@ log = "0.3" env_logger = "0.4" time = "0.1.34" rand = "0.3.13" -heapsize = "0.3" +heapsize = "0.4" ethcore-ipc = { path = "../ipc/rpc" } semver = "0.6" -smallvec = { version = "0.3", features = ["heapsizeof"] } +smallvec = { version = "0.4", features = ["heapsizeof"] } ethcore-ipc-nano = { path = "../ipc/nano" } ethcore-devtools = { path = "../devtools" } ethkey = { path = "../ethkey" } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index f0f98d3dd..edf6f13d2 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -2194,7 +2194,7 @@ mod tests { use network::PeerId; use tests::helpers::*; use tests::snapshot::TestSnapshotService; - use util::{Uint, U256, Address, RwLock}; + use util::{U256, Address, RwLock}; use util::sha3::Hashable; use util::hash::H256; use util::bytes::Bytes; diff --git a/updater/src/operations.rs b/updater/src/operations.rs index 71112d3e7..d94459bef 100644 --- a/updater/src/operations.rs +++ b/updater/src/operations.rs @@ -4,7 +4,7 @@ use std::string::String; use std::result::Result; use std::fmt; use ethabi; -use util::{self, Uint}; +use util; pub struct Operations { contract: ethabi::Contract, diff --git a/util/Cargo.toml b/util/Cargo.toml index c5c585f83..5d5f5ab55 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -16,9 +16,9 @@ time = "0.1.34" rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" } eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } rust-crypto = "0.2.34" -elastic-array = "0.7.0" +elastic-array = "0.8" rlp = { path = "rlp" } -heapsize = { version = "0.3", features = ["unstable"] } +heapsize = "0.4" itertools = "0.5" sha3 = { path = "sha3" } clippy = { version = "0.0.103", optional = true} @@ -26,7 +26,7 @@ ethcore-devtools = { path = "../devtools" } libc = "0.2.7" vergen = "0.1" target_info = "0.1" -ethcore-bigint = "0.1.2" +ethcore-bigint = { path = "bigint" } parking_lot = "0.4" using_queue = { path = "using_queue" } table = { path = "table" } diff --git a/util/benches/bigint.rs b/util/benches/bigint.rs index 9966d7a92..59377c511 100644 --- a/util/benches/bigint.rs +++ b/util/benches/bigint.rs @@ -27,7 +27,7 @@ extern crate test; extern crate ethcore_util; use test::{Bencher, black_box}; -use ethcore_util::{U256, U512, Uint, U128}; +use ethcore_util::{U256, U512, U128}; #[bench] fn u256_add(b: &mut Bencher) { diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index 1a5f029d0..c1225f72c 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -8,9 +8,9 @@ version = "0.1.3" authors = ["Parity Technologies "] [dependencies] -bigint = "1.0.4" +bigint = "2.0" rustc-serialize = "0.3" -heapsize = "0.3" +heapsize = "0.4" rand = "0.3.12" libc = "0.2" diff --git a/util/bigint/src/hash.rs b/util/bigint/src/hash.rs index 95d48bf8d..942ffc6fb 100644 --- a/util/bigint/src/hash.rs +++ b/util/bigint/src/hash.rs @@ -17,7 +17,7 @@ use std::str::FromStr; use rand::Rng; use rand::os::OsRng; use rustc_serialize::hex::{FromHex, FromHexError}; -use bigint::{Uint, U256}; +use bigint::U256; use libc::{c_void, memcmp}; /// Return `s` without the `0x` at the beginning of it, if any. diff --git a/util/rlp/Cargo.toml b/util/rlp/Cargo.toml index 31e077fe4..6ba4aafcd 100644 --- a/util/rlp/Cargo.toml +++ b/util/rlp/Cargo.toml @@ -7,8 +7,8 @@ version = "0.2.0" authors = ["Parity Technologies "] [dependencies] -elastic-array = "0.7.0" -ethcore-bigint = "0.1.3" +elastic-array = "0.8" +ethcore-bigint = { path = "../bigint" } lazy_static = "0.2" rustc-serialize = "0.3" byteorder = "1.0" diff --git a/util/rlp/benches/rlp.rs b/util/rlp/benches/rlp.rs index 6aeabaf5d..927b48583 100644 --- a/util/rlp/benches/rlp.rs +++ b/util/rlp/benches/rlp.rs @@ -15,7 +15,7 @@ #![feature(test)] extern crate test; -extern crate ethcore_bigint as bigint; +extern crate bigint; extern crate rlp; use test::Bencher; diff --git a/util/rlp/src/impls.rs b/util/rlp/src/impls.rs index 8c6a244b2..bc3caa495 100644 --- a/util/rlp/src/impls.rs +++ b/util/rlp/src/impls.rs @@ -8,7 +8,7 @@ use std::{cmp, mem, str}; use byteorder::{ByteOrder, BigEndian}; -use bigint::prelude::{Uint, U128, U256, H64, H128, H160, H256, H512, H520, H2048}; +use bigint::prelude::{U128, U256, H64, H128, H160, H256, H512, H520, H2048}; use traits::{Encodable, Decodable}; use stream::RlpStream; use {UntrustedRlp, DecoderError}; From bbbdd02a0060f8c48d3c6640f83db4a5605c9da1 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Wed, 24 May 2017 10:50:29 +0000 Subject: [PATCH 29/29] [ci skip] js-precompiled 20170524-104640 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 88edf74e6..69d3715b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1825,7 +1825,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#c48daa735fc9267edfa2452bce3308fa63e1816a" +source = "git+https://github.com/paritytech/js-precompiled.git#d09c2b70b2e6c6e84d88999bfa6f3c8bb43b910e" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 0c4c6dfa9..987ba9c50 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.83", + "version": "1.7.84", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ",