From c1288810c64ebb5db644f29a5d07dccb3079f406 Mon Sep 17 00:00:00 2001 From: Fredrik Harrysson Date: Thu, 19 Oct 2017 11:47:53 +0200 Subject: [PATCH 1/4] Change keypath derivation logic (#6815) While the standard defined by Trezor as the default derivation path here https://blog.trezor.io/trezor-integration-with-myetherwallet-3e217a652e08 says that it should be `m/44'/60'/0`, in practice they don't have an implementation of a wallet for Ethereum themselves and refer customers to MEW. MEW has a custom implementation of the path derivation logic that allows them to generate multiple addresses by essentially adding `/0`, `/1` etc to the path. In my initial implementation of Trezor I didn't take this into consideration unfortunately and just used the keypath that Trezor themselves recommended. However, given that it's seemingly standard practice to append `/0` for a "sub-address" (and this is what we've done for Ledger as well) it seems like a mistake on my part to not take that into consideration. Unfortunately, anyone who has used their Trezor device with Parity previously would now see a different address when they connect the Trezor device the next time. The only way they would have to access the old address is to use an old version, or by going through MEW and selecting the Ledger keypath. Also see #6811 --- hw/src/trezor.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/src/trezor.rs b/hw/src/trezor.rs index 239968120..a77d7233c 100644 --- a/hw/src/trezor.rs +++ b/hw/src/trezor.rs @@ -37,8 +37,8 @@ use trezor_sys::messages::{EthereumAddress, PinMatrixAck, MessageType, EthereumT const TREZOR_VID: u16 = 0x534c; const TREZOR_PIDS: [u16; 1] = [0x0001]; // Trezor v1, keeping this as an array to leave room for Trezor v2 which is in progress -const ETH_DERIVATION_PATH: [u32; 4] = [0x8000002C, 0x8000003C, 0x80000000, 0]; // m/44'/60'/0'/0 -const ETC_DERIVATION_PATH: [u32; 4] = [0x8000002C, 0x8000003D, 0x80000000, 0]; // m/44'/61'/0'/0 +const ETH_DERIVATION_PATH: [u32; 5] = [0x8000002C, 0x8000003C, 0x80000000, 0, 0]; // m/44'/60'/0'/0/0 +const ETC_DERIVATION_PATH: [u32; 5] = [0x8000002C, 0x8000003D, 0x80000000, 0, 0]; // m/44'/61'/0'/0/0 /// Hardware wallet error. From fdbf6bf7d6440766cd0f37794b03f14213e2a049 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Thu, 19 Oct 2017 14:18:21 +0200 Subject: [PATCH 2/4] Refresh cached tokens based on registry info & random balances (#6818) * Refresh cached tokens based on registry info & random balances * Don't display errored token images --- js/src/redux/providers/tokensActions.js | 15 +++------------ js/src/ui/TokenImage/tokenImage.js | 14 ++++++++++++-- js/src/util/tokens/index.js | 20 ++++++++++++++++++-- 3 files changed, 33 insertions(+), 16 deletions(-) diff --git a/js/src/redux/providers/tokensActions.js b/js/src/redux/providers/tokensActions.js index df1374dd7..2e1e8c052 100644 --- a/js/src/redux/providers/tokensActions.js +++ b/js/src/redux/providers/tokensActions.js @@ -71,7 +71,6 @@ function loadCachedTokens (tokenRegContract) { // Check if we have data from the right contract if (cached.tokenreg === tokenRegContract.address && cached.tokens) { log.debug('found cached tokens', cached.tokens); - dispatch(_setTokens(cached.tokens)); // Fetch all the tokens images on load // (it's the only thing that might have changed) @@ -105,22 +104,13 @@ export function loadTokens (options = {}) { }; } -export function loadTokensBasics (_tokenIndexes, options) { +export function loadTokensBasics (tokenIndexes, options) { const limit = 64; return (dispatch, getState) => { - const { api, tokens } = getState(); + const { api } = getState(); const { tokenReg } = Contracts.get(); const nextTokens = {}; - const prevTokensIndexes = Object.values(tokens).map((t) => t.index); - - // Only fetch tokens we don't have yet - const tokenIndexes = _tokenIndexes - .filter((tokenIndex) => { - return !prevTokensIndexes.includes(tokenIndex); - }) - .sort(); - const count = tokenIndexes.length; log.debug('loading basic tokens', tokenIndexes); @@ -240,6 +230,7 @@ function fetchTokensData (tokenRegContract, tokenIndexes) { log.debug('fetched', { fullResults, partialResults }); return [].concat(fullResults, partialResults) + .filter(({ address }) => !/0x0*$/.test(address)) .reduce((tokens, token) => { const { id, image, address } = token; diff --git a/js/src/ui/TokenImage/tokenImage.js b/js/src/ui/TokenImage/tokenImage.js index e0e66d22b..af7a80a02 100644 --- a/js/src/ui/TokenImage/tokenImage.js +++ b/js/src/ui/TokenImage/tokenImage.js @@ -32,14 +32,19 @@ class TokenImage extends Component { }).isRequired }; + state = { + error: false + }; + render () { + const { error } = this.state; const { api } = this.context; const { image, token } = this.props; const imageurl = token.image || image; let imagesrc = unknownImage; - if (imageurl) { + if (imageurl && !error) { const host = /^(\/)?api/.test(imageurl) ? api.dappsUrl : ''; @@ -49,11 +54,16 @@ class TokenImage extends Component { return ( { ); } + + handleError = () => { + this.setState({ error: true }); + }; } function mapStateToProps (iniState) { diff --git a/js/src/util/tokens/index.js b/js/src/util/tokens/index.js index 810f16777..11ad0f903 100644 --- a/js/src/util/tokens/index.js +++ b/js/src/util/tokens/index.js @@ -55,9 +55,14 @@ export function fetchTokensBasics (api, tokenReg, start = 0, limit = 100) { return api.eth .call({ data: tokenAddressesBytcode + tokenAddressesCallData }) .then((result) => { - const tokenAddresses = decodeArray(api, 'address[]', result); - + return decodeArray(api, 'address[]', result); + }) + .then((tokenAddresses) => { return tokenAddresses.map((tokenAddress, index) => { + if (/^0x0*$/.test(tokenAddress)) { + return null; + } + const tokenIndex = start + index; return { @@ -68,6 +73,17 @@ export function fetchTokensBasics (api, tokenReg, start = 0, limit = 100) { fetched: false }; }); + }) + .then((tokens) => tokens.filter((token) => token)) + .then((tokens) => { + const randomAddress = sha3(`${Date.now()}`).substr(0, 42); + + return fetchTokensBalances(api, tokens, [randomAddress]) + .then((_balances) => { + const balances = _balances[randomAddress]; + + return tokens.filter(({ id }) => balances[id].eq(0)); + }); }); } From b4c4fddb10b5d047db8fde2bbada8ac01e6366ba Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Thu, 19 Oct 2017 14:41:11 +0200 Subject: [PATCH 3/4] devp2p snappy compression (#6683) --- Cargo.lock | 1 + util/network/Cargo.toml | 1 + util/network/src/connection.rs | 5 +- util/network/src/error.rs | 10 ++++ util/network/src/host.rs | 4 +- util/network/src/lib.rs | 3 +- util/network/src/session.rs | 93 +++++++++++++++++++++------------- 7 files changed, 76 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2eb248e71..e826ea22b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -700,6 +700,7 @@ dependencies = [ "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "snappy 0.1.0", "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/util/network/Cargo.toml b/util/network/Cargo.toml index 0960715bf..e989fb599 100644 --- a/util/network/Cargo.toml +++ b/util/network/Cargo.toml @@ -33,6 +33,7 @@ path = { path = "../path" } ethcore-logger = { path ="../../logger" } ipnetwork = "0.12.6" hash = { path = "../hash" } +snappy = { path = "../snappy" } serde_json = "1.0" [features] diff --git a/util/network/src/connection.rs b/util/network/src/connection.rs index fd61b4b38..726952648 100644 --- a/util/network/src/connection.rs +++ b/util/network/src/connection.rs @@ -40,6 +40,7 @@ use crypto; const ENCRYPTED_HEADER_LEN: usize = 32; const RECIEVE_PAYLOAD_TIMEOUT: u64 = 30000; +pub const MAX_PAYLOAD_SIZE: usize = (1 << 24) - 1; pub trait GenericSocket : Read + Write { } @@ -345,7 +346,7 @@ impl EncryptedConnection { ingress_mac: ingress_mac, read_state: EncryptedConnectionState::Header, protocol_id: 0, - payload_len: 0 + payload_len: 0, }; enc.connection.expect(ENCRYPTED_HEADER_LEN); Ok(enc) @@ -355,7 +356,7 @@ impl EncryptedConnection { pub fn send_packet(&mut self, io: &IoContext, payload: &[u8]) -> Result<(), NetworkError> where Message: Send + Clone + Sync + 'static { let mut header = RlpStream::new(); let len = payload.len(); - if len >= (1 << 24) { + if len > MAX_PAYLOAD_SIZE { return Err(NetworkError::OversizedPacket); } header.append_raw(&[(len >> 16) as u8, (len >> 8) as u8, len as u8], 1); diff --git a/util/network/src/error.rs b/util/network/src/error.rs index 54773d573..96fc1ff23 100644 --- a/util/network/src/error.rs +++ b/util/network/src/error.rs @@ -19,6 +19,7 @@ use rlp::*; use std::fmt; use ethkey::Error as KeyError; use crypto::Error as CryptoError; +use snappy; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum DisconnectReason @@ -107,6 +108,8 @@ pub enum NetworkError { StdIo(::std::io::Error), /// Packet size is over the protocol limit. OversizedPacket, + /// Decompression error. + Decompression(snappy::InvalidInput), } impl fmt::Display for NetworkError { @@ -126,6 +129,7 @@ impl fmt::Display for NetworkError { StdIo(ref err) => format!("{}", err), InvalidNodeId => "Invalid node id".into(), OversizedPacket => "Packet is too large".into(), + Decompression(ref err) => format!("Error decompressing packet: {}", err), }; f.write_fmt(format_args!("Network error ({})", msg)) @@ -162,6 +166,12 @@ impl From for NetworkError { } } +impl From for NetworkError { + fn from(err: snappy::InvalidInput) -> NetworkError { + NetworkError::Decompression(err) + } +} + impl From<::std::net::AddrParseError> for NetworkError { fn from(err: ::std::net::AddrParseError) -> NetworkError { NetworkError::AddressParse(err) diff --git a/util/network/src/host.rs b/util/network/src/host.rs index 13e5f74a3..3d21bf5fe 100644 --- a/util/network/src/host.rs +++ b/util/network/src/host.rs @@ -256,7 +256,7 @@ impl<'s> NetworkContext<'s> { pub fn send_protocol(&self, protocol: ProtocolId, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), NetworkError> { let session = self.resolve_session(peer); if let Some(session) = session { - session.lock().send_packet(self.io, protocol, packet_id as u8, &data)?; + session.lock().send_packet(self.io, Some(protocol), packet_id as u8, &data)?; } else { trace!(target: "network", "Send: Peer no longer exist") } @@ -938,7 +938,7 @@ impl Host { for (p, packet_id, data) in packet_data { let reserved = self.reserved_nodes.read(); if let Some(h) = handlers.get(&p).clone() { - h.read(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]); + h.read(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token, packet_id, &data); } } } diff --git a/util/network/src/lib.rs b/util/network/src/lib.rs index 6257118b6..8f2ccb8d4 100644 --- a/util/network/src/lib.rs +++ b/util/network/src/lib.rs @@ -81,6 +81,7 @@ extern crate ethcore_logger; extern crate ipnetwork; extern crate hash; extern crate serde_json; +extern crate snappy; #[macro_use] extern crate log; @@ -115,7 +116,7 @@ pub use node_table::{is_valid_node_url, NodeId}; use ipnetwork::{IpNetwork, IpNetworkError}; use std::str::FromStr; -const PROTOCOL_VERSION: u32 = 4; +const PROTOCOL_VERSION: u32 = 5; /// Network IO protocol handler. This needs to be implemented for each new subprotocol. /// All the handler function are called from within IO event loop. diff --git a/util/network/src/session.rs b/util/network/src/session.rs index 992081237..cf6c196e3 100644 --- a/util/network/src/session.rs +++ b/util/network/src/session.rs @@ -25,7 +25,7 @@ use mio::deprecated::{Handler, EventLoop}; use mio::tcp::*; use bigint::hash::*; use rlp::*; -use connection::{EncryptedConnection, Packet, Connection}; +use connection::{EncryptedConnection, Packet, Connection, MAX_PAYLOAD_SIZE}; use handshake::Handshake; use io::{IoContext, StreamToken}; use error::{NetworkError, DisconnectReason}; @@ -33,10 +33,13 @@ use host::*; use node_table::NodeId; use stats::NetworkStats; use time; +use snappy; // Timeout must be less than (interval - 1). const PING_TIMEOUT_SEC: u64 = 60; const PING_INTERVAL_SEC: u64 = 120; +const MIN_PROTOCOL_VERSION: u32 = 4; +const MIN_COMPRESSION_PROTOCOL_VERSION: u32 = 5; #[derive(Debug, Clone)] enum ProtocolState { @@ -61,6 +64,7 @@ pub struct Session { state: State, // Protocol states -- accumulates pending packets until signaled as ready. protocol_states: HashMap, + compression: bool, } enum State { @@ -198,6 +202,7 @@ impl Session { pong_time_ns: None, expired: false, protocol_states: HashMap::new(), + compression: false, }) } @@ -211,7 +216,6 @@ impl Session { }; self.state = State::Session(connection); self.write_hello(io, host)?; - self.send_ping(io)?; Ok(()) } @@ -326,28 +330,43 @@ impl Session { } /// Send a protocol packet to peer. - pub fn send_packet(&mut self, io: &IoContext, protocol: [u8; 3], packet_id: u8, data: &[u8]) -> Result<(), NetworkError> + pub fn send_packet(&mut self, io: &IoContext, protocol: Option<[u8; 3]>, packet_id: u8, data: &[u8]) -> Result<(), NetworkError> where Message: Send + Sync + Clone { - if self.info.capabilities.is_empty() || !self.had_hello { - debug!(target: "network", "Sending to unconfirmed session {}, protocol: {}, packet: {}", self.token(), str::from_utf8(&protocol[..]).unwrap_or("??"), packet_id); + if protocol.is_some() && (self.info.capabilities.is_empty() || !self.had_hello) { + debug!(target: "network", "Sending to unconfirmed session {}, protocol: {:?}, packet: {}", self.token(), protocol.as_ref().map(|p| str::from_utf8(&p[..]).unwrap_or("??")), packet_id); return Err(From::from(NetworkError::BadProtocol)); } if self.expired() { return Err(From::from(NetworkError::Expired)); } let mut i = 0usize; - while protocol != self.info.capabilities[i].protocol { - i += 1; - if i == self.info.capabilities.len() { - debug!(target: "network", "Unknown protocol: {:?}", protocol); - return Ok(()) - } - } - let pid = self.info.capabilities[i].id_offset + packet_id; + let pid = match protocol { + Some(protocol) => { + while protocol != self.info.capabilities[i].protocol { + i += 1; + if i == self.info.capabilities.len() { + debug!(target: "network", "Unknown protocol: {:?}", protocol); + return Ok(()) + } + } + self.info.capabilities[i].id_offset + packet_id + }, + None => packet_id + }; let mut rlp = RlpStream::new(); rlp.append(&(pid as u32)); - rlp.append_raw(data, 1); - self.send(io, rlp) + let mut compressed = Vec::new(); + let mut payload = data; // create a reference with local lifetime + if self.compression { + if payload.len() > MAX_PAYLOAD_SIZE { + return Err(NetworkError::OversizedPacket); + } + let len = snappy::compress_into(&payload, &mut compressed); + trace!(target: "network", "compressed {} to {}", payload.len(), len); + payload = &compressed[0..len]; + } + rlp.append_raw(payload, 1); + self.send(io, &rlp.drain()) } /// Keep this session alive. Returns false if ping timeout happened @@ -396,14 +415,23 @@ impl Session { if packet_id != PACKET_HELLO && packet_id != PACKET_DISCONNECT && !self.had_hello { return Err(From::from(NetworkError::BadProtocol)); } + let data = if self.compression { + let compressed = &packet.data[1..]; + if snappy::decompressed_len(&compressed)? > MAX_PAYLOAD_SIZE { + return Err(NetworkError::OversizedPacket); + } + snappy::decompress(&compressed)? + } else { + packet.data[1..].to_owned() + }; match packet_id { PACKET_HELLO => { - let rlp = UntrustedRlp::new(&packet.data[1..]); //TODO: validate rlp expected size + let rlp = UntrustedRlp::new(&data); //TODO: validate rlp expected size self.read_hello(io, &rlp, host)?; Ok(SessionData::Ready) }, PACKET_DISCONNECT => { - let rlp = UntrustedRlp::new(&packet.data[1..]); + let rlp = UntrustedRlp::new(&data); let reason: u8 = rlp.val_at(0)?; if self.had_hello { debug!(target:"network", "Disconnected: {}: {:?}", self.token(), DisconnectReason::from_u8(reason)); @@ -439,11 +467,11 @@ impl Session { match *self.protocol_states.entry(protocol).or_insert_with(|| ProtocolState::Pending(Vec::new())) { ProtocolState::Connected => { trace!(target: "network", "Packet {} mapped to {:?}:{}, i={}, capabilities={:?}", packet_id, protocol, protocol_packet_id, i, self.info.capabilities); - Ok(SessionData::Packet { data: packet.data, protocol: protocol, packet_id: protocol_packet_id } ) + Ok(SessionData::Packet { data: data, protocol: protocol, packet_id: protocol_packet_id } ) } ProtocolState::Pending(ref mut pending) => { trace!(target: "network", "Packet {} deferred until protocol connection event completion", packet_id); - pending.push((packet.data, protocol_packet_id)); + pending.push((data, protocol_packet_id)); Ok(SessionData::Continue) } @@ -465,7 +493,7 @@ impl Session { .append_list(&host.capabilities) .append(&host.local_endpoint.address.port()) .append(host.id()); - self.send(io, rlp) + self.send(io, &rlp.drain()) } fn read_hello(&mut self, io: &IoContext, rlp: &UntrustedRlp, host: &HostInfo) -> Result<(), NetworkError> @@ -494,8 +522,7 @@ impl Session { while i < caps.len() { if caps.iter().any(|c| c.protocol == caps[i].protocol && c.version > caps[i].version) { caps.remove(i); - } - else { + } else { i += 1; } } @@ -520,52 +547,46 @@ impl Session { trace!(target: "network", "No common capabilities with peer."); return Err(From::from(self.disconnect(io, DisconnectReason::UselessPeer))); } - if protocol != host.protocol_version { + if protocol < MIN_PROTOCOL_VERSION { trace!(target: "network", "Peer protocol version mismatch: {}", protocol); return Err(From::from(self.disconnect(io, DisconnectReason::UselessPeer))); } + self.compression = protocol >= MIN_COMPRESSION_PROTOCOL_VERSION; + self.send_ping(io)?; self.had_hello = true; Ok(()) } /// Senf ping packet pub fn send_ping(&mut self, io: &IoContext) -> Result<(), NetworkError> where Message: Send + Sync + Clone { - self.send(io, Session::prepare(PACKET_PING)?)?; + self.send_packet(io, None, PACKET_PING, &EMPTY_LIST_RLP)?; self.ping_time_ns = time::precise_time_ns(); self.pong_time_ns = None; Ok(()) } fn send_pong(&mut self, io: &IoContext) -> Result<(), NetworkError> where Message: Send + Sync + Clone { - self.send(io, Session::prepare(PACKET_PONG)?) + self.send_packet(io, None, PACKET_PONG, &EMPTY_LIST_RLP) } /// Disconnect this session pub fn disconnect(&mut self, io: &IoContext, reason: DisconnectReason) -> NetworkError where Message: Send + Sync + Clone { if let State::Session(_) = self.state { let mut rlp = RlpStream::new(); - rlp.append(&(PACKET_DISCONNECT as u32)); rlp.begin_list(1); rlp.append(&(reason as u32)); - self.send(io, rlp).ok(); + self.send_packet(io, None, PACKET_DISCONNECT, &rlp.drain()).ok(); } NetworkError::Disconnect(reason) } - fn prepare(packet_id: u8) -> Result { - let mut rlp = RlpStream::new(); - rlp.append(&(packet_id as u32)); - rlp.begin_list(0); - Ok(rlp) - } - - fn send(&mut self, io: &IoContext, rlp: RlpStream) -> Result<(), NetworkError> where Message: Send + Sync + Clone { + fn send(&mut self, io: &IoContext, data: &[u8]) -> Result<(), NetworkError> where Message: Send + Sync + Clone { match self.state { State::Handshake(_) => { warn!(target:"network", "Unexpected send request"); }, State::Session(ref mut s) => { - s.send_packet(io, &rlp.out())? + s.send_packet(io, data)? }, } Ok(()) From 58db82dbe2f4908b172cad16063527443225068c Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Thu, 19 Oct 2017 13:08:11 +0000 Subject: [PATCH 4/4] [ci skip] js-precompiled 20171019-130316 --- Cargo.lock | 2 +- js/package-lock.json | 2 +- js/package.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e826ea22b..327e85c84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2249,7 +2249,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#fe0b4dbdfe6e7ebebb247d565d937fd0a0feca5f" +source = "git+https://github.com/paritytech/js-precompiled.git#29360e67331334a9ec3aafdb3725d8f7d8b5d2a1" dependencies = [ "parity-dapps-glue 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package-lock.json b/js/package-lock.json index f30dde364..bc8565dd1 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.38", + "version": "1.8.39", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/js/package.json b/js/package.json index ed94c6192..feadd08d8 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.38", + "version": "1.8.39", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ",