diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d27b58f9a..84d10e1bc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -422,14 +422,10 @@ test-rust-stable: image: ethcore/rust:stable before_script: - git submodule update --init --recursive - - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v ^js/ | wc -l) - - export JS_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep ^js/ | wc -l) - - echo "rust/js modified: $RUST_FILES_MODIFIED / $JS_FILES_MODIFIED" - - if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi + - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l) script: - export RUST_BACKTRACE=1 - - if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "Skipping JS lint since no JS files modified."; else ./js/scripts/lint.sh && ./js/scripts/test.sh && ./js/scripts/build.sh; fi - - if [ "$RUST_FILES_MODIFIED" = 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi + - if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi tags: - rust - rust-stable @@ -439,12 +435,9 @@ js-test: before_script: - git submodule update --init --recursive - export JS_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep ^js/ | wc -l) - - echo $JS_FILES_MODIFIED - - if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi + - if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi script: - - export RUST_BACKTRACE=1 - - echo $JS_FILES_MODIFIED - - if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "Skipping JS lint since no JS files modified."; else ./js/scripts/lint.sh && ./js/scripts/test.sh && ./js/scripts/build.sh; fi + - if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS lint since no JS files modified."; else ./js/scripts/lint.sh && ./js/scripts/test.sh && ./js/scripts/build.sh; fi tags: - rust - rust-stable @@ -487,9 +480,9 @@ js-release: before_script: - export JS_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep ^js/ | wc -l) - echo $JS_FILES_MODIFIED - - if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi + - if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi script: - echo $JS_FILES_MODIFIED - - if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "Skipping JS rebuild since no JS files modified."; else ./js/scripts/build.sh && ./js/scripts/release.sh; fi + - if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS rebuild since no JS files modified."; else ./js/scripts/build.sh && ./js/scripts/release.sh; fi tags: - javascript diff --git a/Cargo.lock b/Cargo.lock index 4ec3404ae..957bfb4ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,6 +18,7 @@ dependencies = [ "ethcore-ipc-hypervisor 1.2.0", "ethcore-ipc-nano 1.5.0", "ethcore-ipc-tests 0.1.0", + "ethcore-light 1.5.0", "ethcore-logger 1.5.0", "ethcore-rpc 1.5.0", "ethcore-signer 1.5.0", @@ -458,6 +459,21 @@ dependencies = [ "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethcore-light" +version = "1.5.0" +dependencies = [ + "ethcore 1.5.0", + "ethcore-io 1.5.0", + "ethcore-ipc 1.5.0", + "ethcore-ipc-codegen 1.5.0", + "ethcore-network 1.5.0", + "ethcore-util 1.5.0", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rlp 0.1.0", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ethcore-logger" version = "1.5.0" @@ -667,6 +683,7 @@ dependencies = [ "ethcore-ipc 1.5.0", "ethcore-ipc-codegen 1.5.0", "ethcore-ipc-nano 1.5.0", + "ethcore-light 1.5.0", "ethcore-network 1.5.0", "ethcore-util 1.5.0", "ethkey 0.2.0", @@ -1315,7 +1332,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/ethcore/js-precompiled.git#3d3b2f9e8e8b0fd62c172240bfd001a317cf2979" +source = "git+https://github.com/ethcore/js-precompiled.git#4465ba44b8f698640f2355508245f7f9e370343b" dependencies = [ "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index 85e55ed21..f19cad512 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,6 +49,7 @@ ethcore-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.103", optional = true} rpc-cli = { path = "rpc_cli" } parity-rpc-client = { path = "rpc_client" } +ethcore-light = { path = "ethcore/light" } [target.'cfg(windows)'.dependencies] winapi = "0.2" diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 74400d7ab..c89bbc74f 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -8,14 +8,18 @@ authors = ["Ethcore "] build = "build.rs" [build-dependencies] -"ethcore-ipc-codegen" = { path = "../../ipc/codegen" } +"ethcore-ipc-codegen" = { path = "../../ipc/codegen", optional = true } [dependencies] log = "0.3" -ethcore = { path = ".." } +ethcore = { path = ".."} ethcore-util = { path = "../../util" } ethcore-network = { path = "../../util/network" } ethcore-io = { path = "../../util/io" } -ethcore-ipc = { path = "../../ipc/rpc" } +ethcore-ipc = { path = "../../ipc/rpc", optional = true } rlp = { path = "../../util/rlp" } -time = "0.1" \ No newline at end of file +time = "0.1" + +[features] +default = [] +ipc = ["ethcore-ipc", "ethcore-ipc-codegen"] \ No newline at end of file diff --git a/ethcore/light/build.rs b/ethcore/light/build.rs index cff92a011..7d4e0064c 100644 --- a/ethcore/light/build.rs +++ b/ethcore/light/build.rs @@ -14,8 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +#[cfg(feature = "ipc")] extern crate ethcore_ipc_codegen; +#[cfg(feature = "ipc")] fn main() { ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap(); + ethcore_ipc_codegen::derive_ipc_cond("src/provider.rs", true).unwrap(); } + +#[cfg(not(feature = "ipc"))] +fn main() { } \ No newline at end of file diff --git a/ethcore/light/src/client.rs b/ethcore/light/src/client.rs index 8a5c43c48..0035406dc 100644 --- a/ethcore/light/src/client.rs +++ b/ethcore/light/src/client.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use ethcore::engines::Engine; -use ethcore::ids::BlockID; +use ethcore::ids::BlockId; use ethcore::service::ClientIoMessage; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; @@ -51,7 +51,7 @@ impl Client { } /// Whether the block is already known (but not necessarily part of the canonical chain) - pub fn is_known(&self, _id: BlockID) -> bool { + pub fn is_known(&self, _id: BlockId) -> bool { false } @@ -61,7 +61,7 @@ impl Client { } /// Inquire about the status of a given block. - pub fn status(&self, _id: BlockID) -> BlockStatus { + pub fn status(&self, _id: BlockId) -> BlockStatus { BlockStatus::Unknown } diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index e150f4ee5..7fa2f5911 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -33,8 +33,21 @@ pub mod client; pub mod net; + +#[cfg(not(feature = "ipc"))] pub mod provider; +#[cfg(feature = "ipc")] +pub mod provider { + #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues + include!(concat!(env!("OUT_DIR"), "/provider.rs")); +} + +#[cfg(feature = "ipc")] +pub mod remote { + pub use provider::LightProviderClient; +} + mod types; pub use self::provider::Provider; @@ -47,6 +60,8 @@ extern crate ethcore; extern crate ethcore_util as util; extern crate ethcore_network as network; extern crate ethcore_io as io; -extern crate ethcore_ipc as ipc; extern crate rlp; -extern crate time; \ No newline at end of file +extern crate time; + +#[cfg(feature = "ipc")] +extern crate ethcore_ipc as ipc; \ No newline at end of file diff --git a/ethcore/light/src/net/buffer_flow.rs b/ethcore/light/src/net/buffer_flow.rs index 6730c71a7..2371c6ea4 100644 --- a/ethcore/light/src/net/buffer_flow.rs +++ b/ethcore/light/src/net/buffer_flow.rs @@ -22,6 +22,9 @@ //! //! This module provides an interface for configuration of buffer //! flow costs and recharge rates. +//! +//! Current default costs are picked completely arbitrarily, not based +//! on any empirical timings or mathematical models. use request; use super::packet; @@ -273,6 +276,16 @@ impl FlowParams { } } +impl Default for FlowParams { + fn default() -> Self { + FlowParams { + limit: 50_000_000.into(), + costs: CostTable::default(), + recharge: 100_000.into(), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs new file mode 100644 index 000000000..c05e69b0f --- /dev/null +++ b/ethcore/light/src/net/context.rs @@ -0,0 +1,120 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! I/O and event context generalizations. + +use network::{NetworkContext, PeerId}; + +use super::{Announcement, LightProtocol, ReqId}; +use super::error::Error; +use request::Request; + +/// An I/O context which allows sending and receiving packets as well as +/// disconnecting peers. This is used as a generalization of the portions +/// of a p2p network which the light protocol structure makes use of. +pub trait IoContext { + /// Send a packet to a specific peer. + fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec); + + /// Respond to a peer's message. Only works if this context is a byproduct + /// of a packet handler. + fn respond(&self, packet_id: u8, packet_body: Vec); + + /// Disconnect a peer. + fn disconnect_peer(&self, peer: PeerId); + + /// Disable a peer -- this is a disconnect + a time-out. + fn disable_peer(&self, peer: PeerId); + + /// Get a peer's protocol version. + fn protocol_version(&self, peer: PeerId) -> Option; +} + +impl<'a> IoContext for NetworkContext<'a> { + fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { + if let Err(e) = self.send(peer, packet_id, packet_body) { + debug!(target: "les", "Error sending packet to peer {}: {}", peer, e); + } + } + + fn respond(&self, packet_id: u8, packet_body: Vec) { + if let Err(e) = self.respond(packet_id, packet_body) { + debug!(target: "les", "Error responding to peer message: {}", e); + } + } + + fn disconnect_peer(&self, peer: PeerId) { + NetworkContext::disconnect_peer(self, peer); + } + + fn disable_peer(&self, peer: PeerId) { + NetworkContext::disable_peer(self, peer); + } + + fn protocol_version(&self, peer: PeerId) -> Option { + self.protocol_version(self.subprotocol_name(), peer) + } +} + +/// Context for a protocol event. +pub trait EventContext { + /// Get the peer relevant to the event e.g. message sender, + /// disconnected/connected peer. + fn peer(&self) -> PeerId; + + /// Make a request from a peer. + fn request_from(&self, peer: PeerId, request: Request) -> Result; + + /// Make an announcement of new capabilities to the rest of the peers. + // TODO: maybe just put this on a timer in LightProtocol? + fn make_announcement(&self, announcement: Announcement); + + /// Disconnect a peer. + fn disconnect_peer(&self, peer: PeerId); + + /// Disable a peer. + fn disable_peer(&self, peer: PeerId); +} + +/// Concrete implementation of `EventContext` over the light protocol struct and +/// an io context. +pub struct Ctx<'a> { + /// Io context to enable immediate response to events. + pub io: &'a IoContext, + /// Protocol implementation. + pub proto: &'a LightProtocol, + /// Relevant peer for event. + pub peer: PeerId, +} + +impl<'a> EventContext for Ctx<'a> { + fn peer(&self) -> PeerId { self.peer } + fn request_from(&self, peer: PeerId, request: Request) -> Result { + self.proto.request_from(self.io, &peer, request) + } + + fn make_announcement(&self, announcement: Announcement) { + self.proto.make_announcement(self.io, announcement); + } + + fn disconnect_peer(&self, peer: PeerId) { + self.io.disconnect_peer(peer); + } + + fn disable_peer(&self, peer: PeerId) { + self.io.disable_peer(peer); + } +} \ No newline at end of file diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index 0855cdeb8..01a15928b 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -54,6 +54,14 @@ pub enum Error { WrongNetwork, /// Unknown peer. UnknownPeer, + /// Unsolicited response. + UnsolicitedResponse, + /// Not a server. + NotServer, + /// Unsupported protocol version. + UnsupportedProtocolVersion(u8), + /// Bad protocol version. + BadProtocolVersion, } impl Error { @@ -67,6 +75,10 @@ impl Error { Error::UnexpectedHandshake => Punishment::Disconnect, Error::WrongNetwork => Punishment::Disable, Error::UnknownPeer => Punishment::Disconnect, + Error::UnsolicitedResponse => Punishment::Disable, + Error::NotServer => Punishment::Disable, + Error::UnsupportedProtocolVersion(_) => Punishment::Disable, + Error::BadProtocolVersion => Punishment::Disable, } } } @@ -92,7 +104,11 @@ impl fmt::Display for Error { Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code), Error::UnexpectedHandshake => write!(f, "Unexpected handshake"), Error::WrongNetwork => write!(f, "Wrong network"), - Error::UnknownPeer => write!(f, "unknown peer"), + Error::UnknownPeer => write!(f, "Unknown peer"), + Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"), + Error::NotServer => write!(f, "Peer not a server."), + Error::UnsupportedProtocolVersion(pv) => write!(f, "Unsupported protocol version: {}", pv), + Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"), } } } \ No newline at end of file diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index a1b3b30b0..481740a48 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -20,36 +20,51 @@ //! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) use ethcore::transaction::SignedTransaction; +use ethcore::receipt::Receipt; + use io::TimerToken; -use network::{NetworkProtocolHandler, NetworkContext, NetworkError, PeerId}; +use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use rlp::{RlpStream, Stream, UntrustedRlp, View}; use util::hash::H256; -use util::{Mutex, RwLock, U256}; -use time::SteadyTime; +use util::{Bytes, Mutex, RwLock, U256}; +use time::{Duration, SteadyTime}; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; +use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; use request::{self, Request}; use self::buffer_flow::{Buffer, FlowParams}; +use self::context::Ctx; use self::error::{Error, Punishment}; mod buffer_flow; +mod context; mod error; mod status; -pub use self::status::{Status, Capabilities, Announcement, NetworkId}; +#[cfg(test)] +mod tests; + +pub use self::context::{EventContext, IoContext}; +pub use self::status::{Status, Capabilities, Announcement}; const TIMEOUT: TimerToken = 0; const TIMEOUT_INTERVAL_MS: u64 = 1000; -// LPV1 -const PROTOCOL_VERSION: u32 = 1; +// minimum interval between updates. +const UPDATE_INTERVAL_MS: i64 = 5000; -// TODO [rob] make configurable. -const PROTOCOL_ID: [u8; 3] = *b"les"; +// Supported protocol versions. +pub const PROTOCOL_VERSIONS: &'static [u8] = &[1]; + +// Max protocol version. +pub const MAX_PROTOCOL_VERSION: u8 = 1; + +// Packet count for LES. +pub const PACKET_COUNT: u8 = 15; // packet ID definitions. mod packet { @@ -95,17 +110,19 @@ pub struct ReqId(usize); // may not have received one for. struct PendingPeer { sent_head: H256, + last_update: SteadyTime, + proto_version: u8, } // data about each peer. struct Peer { local_buffer: Buffer, // their buffer relative to us - remote_buffer: Buffer, // our buffer relative to them - current_asking: HashSet, // pending request ids. status: Status, capabilities: Capabilities, - remote_flow: FlowParams, + remote_flow: Option<(Buffer, FlowParams)>, sent_head: H256, // last head we've given them. + last_update: SteadyTime, + proto_version: u8, } impl Peer { @@ -126,38 +143,56 @@ impl Peer { self.local_buffer.current() } - - // recharge remote buffer with remote flow params. - fn recharge_remote(&mut self) { - let flow = &mut self.remote_flow; - flow.recharge(&mut self.remote_buffer); - } } /// An LES event handler. +/// +/// Each handler function takes a context which describes the relevant peer +/// and gives references to the IO layer and protocol structure so new messages +/// can be dispatched immediately. +/// +/// Request responses are not guaranteed to be complete or valid, but passed IDs will be correct. +/// Response handlers are not given a copy of the original request; it is assumed +/// that relevant data will be stored by interested handlers. pub trait Handler: Send + Sync { /// Called when a peer connects. - fn on_connect(&self, _id: PeerId, _status: &Status, _capabilities: &Capabilities) { } - /// Called when a peer disconnects - fn on_disconnect(&self, _id: PeerId) { } + fn on_connect(&self, _ctx: &EventContext, _status: &Status, _capabilities: &Capabilities) { } + /// Called when a peer disconnects, with a list of unfulfilled request IDs as + /// of yet. + fn on_disconnect(&self, _ctx: &EventContext, _unfulfilled: &[ReqId]) { } /// Called when a peer makes an announcement. - fn on_announcement(&self, _id: PeerId, _announcement: &Announcement) { } + fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { } /// Called when a peer requests relay of some transactions. - fn on_transactions(&self, _id: PeerId, _relay: &[SignedTransaction]) { } + fn on_transactions(&self, _ctx: &EventContext, _relay: &[SignedTransaction]) { } + /// Called when a peer responds with block bodies. + fn on_block_bodies(&self, _ctx: &EventContext, _req_id: ReqId, _bodies: &[Bytes]) { } + /// Called when a peer responds with block headers. + fn on_block_headers(&self, _ctx: &EventContext, _req_id: ReqId, _headers: &[Bytes]) { } + /// Called when a peer responds with block receipts. + fn on_receipts(&self, _ctx: &EventContext, _req_id: ReqId, _receipts: &[Vec]) { } + /// Called when a peer responds with state proofs. Each proof is a series of trie + /// nodes in ascending order by distance from the root. + fn on_state_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[Vec]) { } + /// Called when a peer responds with contract code. + fn on_code(&self, _ctx: &EventContext, _req_id: ReqId, _codes: &[Bytes]) { } + /// Called when a peer responds with header proofs. Each proof is a block header coupled + /// with a series of trie nodes is ascending order by distance from the root. + fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } + /// Called on abort. + fn on_abort(&self) { } } -// a request and the time it was made. +// a request, the peer who it was made to, and the time it was made. struct Requested { request: Request, timestamp: SteadyTime, + peer_id: PeerId, } /// Protocol parameters. pub struct Params { - /// Genesis hash. - pub genesis_hash: H256, /// Network id. - pub network_id: NetworkId, + pub network_id: u64, /// Buffer flow parameters. pub flow_params: FlowParams, /// Initial capabilities. @@ -175,9 +210,9 @@ pub struct Params { // Locks must be acquired in the order declared, and when holding a read lock // on the peers, only one peer may be held at a time. pub struct LightProtocol { - provider: Box, + provider: Arc, genesis_hash: H256, - network_id: NetworkId, + network_id: u64, pending_peers: RwLock>, peers: RwLock>>, pending_requests: RwLock>, @@ -189,10 +224,13 @@ pub struct LightProtocol { impl LightProtocol { /// Create a new instance of the protocol manager. - pub fn new(provider: Box, params: Params) -> Self { + pub fn new(provider: Arc, params: Params) -> Self { + debug!(target: "les", "Initializing LES handler"); + + let genesis_hash = provider.chain_info().genesis_hash; LightProtocol { provider: provider, - genesis_hash: params.genesis_hash, + genesis_hash: genesis_hash, network_id: params.network_id, pending_peers: RwLock::new(HashMap::new()), peers: RwLock::new(HashMap::new()), @@ -207,28 +245,37 @@ impl LightProtocol { /// Check the maximum amount of requests of a specific type /// which a peer would be able to serve. pub fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option { - self.peers.read().get(&peer).map(|peer| { + self.peers.read().get(&peer).and_then(|peer| { let mut peer = peer.lock(); - peer.recharge_remote(); - peer.remote_flow.max_amount(&peer.remote_buffer, kind) + match peer.remote_flow.as_mut() { + Some(&mut (ref mut buf, ref flow)) => { + flow.recharge(buf); + Some(flow.max_amount(&*buf, kind)) + } + None => None, + } }) } /// Make a request to a peer. /// - /// Fails on: nonexistent peer, network error, + /// Fails on: nonexistent peer, network error, peer not server, /// insufficient buffer. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. - pub fn request_from(&self, io: &NetworkContext, peer_id: &PeerId, request: Request) -> Result { + pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { let peers = self.peers.read(); let peer = try!(peers.get(peer_id).ok_or_else(|| Error::UnknownPeer)); let mut peer = peer.lock(); - peer.recharge_remote(); - - let max = peer.remote_flow.compute_cost(request.kind(), request.amount()); - try!(peer.remote_buffer.deduct_cost(max)); + match peer.remote_flow.as_mut() { + Some(&mut (ref mut buf, ref flow)) => { + flow.recharge(buf); + let max = flow.compute_cost(request.kind(), request.amount()); + try!(buf.deduct_cost(max)); + } + None => return Err(Error::NotServer), + } let req_id = self.req_id.fetch_add(1, Ordering::SeqCst); let packet_data = encode_request(&request, req_id); @@ -242,12 +289,12 @@ impl LightProtocol { request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS, }; - try!(io.send(*peer_id, packet_id, packet_data)); + io.send(*peer_id, packet_id, packet_data); - peer.current_asking.insert(req_id); self.pending_requests.write().insert(req_id, Requested { request: request, timestamp: SteadyTime::now(), + peer_id: *peer_id, }); Ok(ReqId(req_id)) @@ -255,8 +302,9 @@ impl LightProtocol { /// Make an announcement of new chain head and capabilities to all peers. /// The announcement is expected to be valid. - pub fn make_announcement(&self, io: &NetworkContext, mut announcement: Announcement) { + pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) { let mut reorgs_map = HashMap::new(); + let now = SteadyTime::now(); // update stored capabilities self.capabilities.write().update_from(&announcement); @@ -264,6 +312,17 @@ impl LightProtocol { // calculate reorg info and send packets for (peer_id, peer_info) in self.peers.read().iter() { let mut peer_info = peer_info.lock(); + + // TODO: "urgent" announcements like new blocks? + // the timer approach will skip 1 (possibly 2) in rare occasions. + if peer_info.sent_head == announcement.head_hash || + peer_info.status.head_num >= announcement.head_num || + now - peer_info.last_update < Duration::milliseconds(UPDATE_INTERVAL_MS) { + continue + } + + peer_info.last_update = now; + let reorg_depth = reorgs_map.entry(peer_info.sent_head) .or_insert_with(|| { match self.provider.reorg_depth(&announcement.head_hash, &peer_info.sent_head) { @@ -281,26 +340,133 @@ impl LightProtocol { peer_info.sent_head = announcement.head_hash; announcement.reorg_depth = *reorg_depth; - if let Err(e) = io.send(*peer_id, packet::ANNOUNCE, status::write_announcement(&announcement)) { - debug!(target: "les", "Error sending to peer {}: {}", peer_id, e); - } + io.send(*peer_id, packet::ANNOUNCE, status::write_announcement(&announcement)); } } /// Add an event handler. /// Ownership will be transferred to the protocol structure, /// and the handler will be kept alive as long as it is. - /// These are intended to be added at the beginning of the + /// These are intended to be added when the protocol structure + /// is initialized as a means of customizing its behavior. pub fn add_handler(&mut self, handler: Box) { self.handlers.push(handler); } + + /// Signal to handlers that network activity is being aborted + /// and clear peer data. + pub fn abort(&self) { + for handler in &self.handlers { + handler.on_abort(); + } + + // acquire in order and hold. + let mut pending_peers = self.pending_peers.write(); + let mut peers = self.peers.write(); + let mut pending_requests = self.pending_requests.write(); + + pending_peers.clear(); + peers.clear(); + pending_requests.clear(); + } + + // Does the common pre-verification of responses before the response itself + // is actually decoded: + // - check whether peer exists + // - check whether request was made + // - check whether request kinds match + fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result { + let req_id: usize = try!(raw.val_at(0)); + let cur_buffer: U256 = try!(raw.val_at(1)); + + trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind); + + match self.pending_requests.write().remove(&req_id) { + None => return Err(Error::UnsolicitedResponse), + Some(requested) => { + if requested.peer_id != *peer || requested.request.kind() != kind { + return Err(Error::UnsolicitedResponse) + } + } + } + + let peers = self.peers.read(); + match peers.get(peer) { + Some(peer_info) => { + let mut peer_info = peer_info.lock(); + match peer_info.remote_flow.as_mut() { + Some(&mut (ref mut buf, ref mut flow)) => { + let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit()); + buf.update_to(actual_buffer) + } + None => return Err(Error::NotServer), // this really should be impossible. + } + Ok(ReqId(req_id)) + } + None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind. + } + } + + // handle a packet using the given io context. + fn handle_packet(&self, io: &IoContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + let rlp = UntrustedRlp::new(data); + + trace!(target: "les", "Incoming packet {} from peer {}", packet_id, peer); + + // handle the packet + let res = match packet_id { + packet::STATUS => self.status(peer, io, rlp), + packet::ANNOUNCE => self.announcement(peer, io, rlp), + + packet::GET_BLOCK_HEADERS => self.get_block_headers(peer, io, rlp), + packet::BLOCK_HEADERS => self.block_headers(peer, io, rlp), + + packet::GET_BLOCK_BODIES => self.get_block_bodies(peer, io, rlp), + packet::BLOCK_BODIES => self.block_bodies(peer, io, rlp), + + packet::GET_RECEIPTS => self.get_receipts(peer, io, rlp), + packet::RECEIPTS => self.receipts(peer, io, rlp), + + packet::GET_PROOFS => self.get_proofs(peer, io, rlp), + packet::PROOFS => self.proofs(peer, io, rlp), + + packet::GET_CONTRACT_CODES => self.get_contract_code(peer, io, rlp), + packet::CONTRACT_CODES => self.contract_code(peer, io, rlp), + + packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), + packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), + + packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), + + other => { + Err(Error::UnrecognizedPacket(other)) + } + }; + + // if something went wrong, figure out how much to punish the peer. + if let Err(e) = res { + match e.punishment() { + Punishment::None => {} + Punishment::Disconnect => { + debug!(target: "les", "Disconnecting peer {}: {}", peer, e); + io.disconnect_peer(*peer) + } + Punishment::Disable => { + debug!(target: "les", "Disabling peer {}: {}", peer, e); + io.disable_peer(*peer) + } + } + } + } } impl LightProtocol { // called when a peer connects. - fn on_connect(&self, peer: &PeerId, io: &NetworkContext) { + fn on_connect(&self, peer: &PeerId, io: &IoContext) { let peer = *peer; + trace!(target: "les", "Peer {} connecting", peer); + match self.send_status(peer, io) { Ok(pending_peer) => { self.pending_peers.write().insert(peer, pending_peer); @@ -313,44 +479,69 @@ impl LightProtocol { } // called when a peer disconnects. - fn on_disconnect(&self, peer: PeerId) { - // TODO: reassign all requests assigned to this peer. + fn on_disconnect(&self, peer: PeerId, io: &IoContext) { + trace!(target: "les", "Peer {} disconnecting", peer); + + self.pending_peers.write().remove(&peer); if self.peers.write().remove(&peer).is_some() { + let unfulfilled: Vec<_> = self.pending_requests.read() + .iter() + .filter(|&(_, r)| r.peer_id == peer) + .map(|(&id, _)| ReqId(id)) + .collect(); + + { + let mut pending = self.pending_requests.write(); + for &ReqId(ref inner) in &unfulfilled { + pending.remove(inner); + } + } + for handler in &self.handlers { - handler.on_disconnect(peer) + handler.on_disconnect(&Ctx { + peer: peer, + io: io, + proto: self, + }, &unfulfilled) } } } // send status to a peer. - fn send_status(&self, peer: PeerId, io: &NetworkContext) -> Result { - let chain_info = self.provider.chain_info(); + fn send_status(&self, peer: PeerId, io: &IoContext) -> Result { + let proto_version = try!(io.protocol_version(peer).ok_or(Error::WrongNetwork)); - // TODO: could update capabilities here. + if PROTOCOL_VERSIONS.iter().find(|x| **x == proto_version).is_none() { + return Err(Error::UnsupportedProtocolVersion(proto_version)); + } + + let chain_info = self.provider.chain_info(); let status = Status { head_td: chain_info.total_difficulty, head_hash: chain_info.best_block_hash, head_num: chain_info.best_block_number, genesis_hash: chain_info.genesis_hash, - protocol_version: PROTOCOL_VERSION, + protocol_version: proto_version as u32, // match peer proto version network_id: self.network_id, last_head: None, }; let capabilities = self.capabilities.read().clone(); - let status_packet = status::write_handshake(&status, &capabilities, &self.flow_params); + let status_packet = status::write_handshake(&status, &capabilities, Some(&self.flow_params)); - try!(io.send(peer, packet::STATUS, status_packet)); + io.send(peer, packet::STATUS, status_packet); Ok(PendingPeer { sent_head: chain_info.best_block_hash, + last_update: SteadyTime::now(), + proto_version: proto_version, }) } // Handle status message from peer. - fn status(&self, peer: &PeerId, data: UntrustedRlp) -> Result<(), Error> { + fn status(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { let pending = match self.pending_peers.write().remove(peer) { Some(pending) => pending, None => { @@ -366,63 +557,80 @@ impl LightProtocol { return Err(Error::WrongNetwork); } + if Some(status.protocol_version as u8) != io.protocol_version(*peer) { + return Err(Error::BadProtocolVersion); + } + + let remote_flow = flow_params.map(|params| (params.create_buffer(), params)); + self.peers.write().insert(*peer, Mutex::new(Peer { local_buffer: self.flow_params.create_buffer(), - remote_buffer: flow_params.create_buffer(), - current_asking: HashSet::new(), status: status.clone(), capabilities: capabilities.clone(), - remote_flow: flow_params, + remote_flow: remote_flow, sent_head: pending.sent_head, + last_update: pending.last_update, + proto_version: pending.proto_version, })); for handler in &self.handlers { - handler.on_connect(*peer, &status, &capabilities) + handler.on_connect(&Ctx { + peer: *peer, + io: io, + proto: self, + }, &status, &capabilities) } Ok(()) } // Handle an announcement. - fn announcement(&self, peer: &PeerId, data: UntrustedRlp) -> Result<(), Error> { + fn announcement(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { if !self.peers.read().contains_key(peer) { debug!(target: "les", "Ignoring announcement from unknown peer"); return Ok(()) } let announcement = try!(status::parse_announcement(data)); - let peers = self.peers.read(); - let peer_info = match peers.get(peer) { - Some(info) => info, - None => return Ok(()), - }; - - let mut peer_info = peer_info.lock(); - - // update status. + // scope to ensure locks are dropped before moving into handler-space. { - // TODO: punish peer if they've moved backwards. - let status = &mut peer_info.status; - let last_head = status.head_hash; - status.head_hash = announcement.head_hash; - status.head_td = announcement.head_td; - status.head_num = announcement.head_num; - status.last_head = Some((last_head, announcement.reorg_depth)); + let peers = self.peers.read(); + let peer_info = match peers.get(peer) { + Some(info) => info, + None => return Ok(()), + }; + + let mut peer_info = peer_info.lock(); + + // update status. + { + // TODO: punish peer if they've moved backwards. + let status = &mut peer_info.status; + let last_head = status.head_hash; + status.head_hash = announcement.head_hash; + status.head_td = announcement.head_td; + status.head_num = announcement.head_num; + status.last_head = Some((last_head, announcement.reorg_depth)); + } + + // update capabilities. + peer_info.capabilities.update_from(&announcement); } - // update capabilities. - peer_info.capabilities.update_from(&announcement); - for handler in &self.handlers { - handler.on_announcement(*peer, &announcement); + handler.on_announcement(&Ctx { + peer: *peer, + io: io, + proto: self, + }, &announcement); } Ok(()) } // Handle a request for block headers. - fn get_block_headers(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_HEADERS: usize = 512; let peers = self.peers.read(); @@ -467,16 +675,29 @@ impl LightProtocol { } stream.out() - }).map_err(Into::into) + }); + + Ok(()) } // Receive a response for block headers. - fn block_headers(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + fn block_headers(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let req_id = try!(self.pre_verify_response(peer, request::Kind::Headers, &raw)); + let raw_headers: Vec<_> = raw.iter().skip(2).map(|x| x.as_raw().to_owned()).collect(); + + for handler in &self.handlers { + handler.on_block_headers(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_headers); + } + + Ok(()) } // Handle a request for block bodies. - fn get_block_bodies(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + fn get_block_bodies(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_BODIES: usize = 256; let peers = self.peers.read(); @@ -513,16 +734,29 @@ impl LightProtocol { } stream.out() - }).map_err(Into::into) + }); + + Ok(()) } // Receive a response for block bodies. - fn block_bodies(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + fn block_bodies(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let req_id = try!(self.pre_verify_response(peer, request::Kind::Bodies, &raw)); + let raw_bodies: Vec = raw.iter().skip(2).map(|x| x.as_raw().to_owned()).collect(); + + for handler in &self.handlers { + handler.on_block_bodies(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_bodies); + } + + Ok(()) } // Handle a request for receipts. - fn get_receipts(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + fn get_receipts(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_RECEIPTS: usize = 256; let peers = self.peers.read(); @@ -559,16 +793,33 @@ impl LightProtocol { } stream.out() - }).map_err(Into::into) + }); + + Ok(()) } // Receive a response for receipts. - fn receipts(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + fn receipts(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let req_id = try!(self.pre_verify_response(peer, request::Kind::Receipts, &raw)); + let raw_receipts: Vec> = try!(raw + .iter() + .skip(2) + .map(|x| x.as_val()) + .collect()); + + for handler in &self.handlers { + handler.on_receipts(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_receipts); + } + + Ok(()) } // Handle a request for proofs. - fn get_proofs(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + fn get_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_PROOFS: usize = 128; let peers = self.peers.read(); @@ -616,16 +867,33 @@ impl LightProtocol { } stream.out() - }).map_err(Into::into) + }); + + Ok(()) } // Receive a response for proofs. - fn proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + fn proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let req_id = try!(self.pre_verify_response(peer, request::Kind::StateProofs, &raw)); + + let raw_proofs: Vec> = raw.iter() + .skip(2) + .map(|x| x.iter().map(|node| node.as_raw().to_owned()).collect()) + .collect(); + + for handler in &self.handlers { + handler.on_state_proofs(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_proofs); + } + + Ok(()) } // Handle a request for contract code. - fn get_contract_code(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + fn get_contract_code(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_CODES: usize = 256; let peers = self.peers.read(); @@ -667,20 +935,34 @@ impl LightProtocol { stream.append(&req_id).append(&cur_buffer); for code in response { - stream.append_raw(&code, 1); + stream.append(&code); } stream.out() - }).map_err(Into::into) + }); + + Ok(()) } // Receive a response for contract code. - fn contract_code(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + fn contract_code(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let req_id = try!(self.pre_verify_response(peer, request::Kind::Codes, &raw)); + + let raw_code: Vec = try!(raw.iter().skip(2).map(|x| x.as_val()).collect()); + + for handler in &self.handlers { + handler.on_code(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_code); + } + + Ok(()) } // Handle a request for header proofs - fn get_header_proofs(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + fn get_header_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_PROOFS: usize = 256; let peers = self.peers.read(); @@ -727,16 +1009,37 @@ impl LightProtocol { } stream.out() - }).map_err(Into::into) + }); + + Ok(()) } // Receive a response for header proofs - fn header_proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + fn header_proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + fn decode_res(raw: UntrustedRlp) -> Result<(Bytes, Vec), ::rlp::DecoderError> { + Ok(( + try!(raw.val_at(0)), + try!(raw.at(1)).iter().map(|x| x.as_raw().to_owned()).collect(), + )) + } + + + let req_id = try!(self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)); + let raw_proofs: Vec<_> = try!(raw.iter().skip(2).map(decode_res).collect()); + + for handler in &self.handlers { + handler.on_header_proofs(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_proofs); + } + + Ok(()) } // Receive a set of transactions to relay. - fn relay_transactions(&self, peer: &PeerId, data: UntrustedRlp) -> Result<(), Error> { + fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_TRANSACTIONS: usize = 256; let txs: Vec<_> = try!(data.iter().take(MAX_TRANSACTIONS).map(|x| x.as_val::()).collect()); @@ -744,7 +1047,11 @@ impl LightProtocol { debug!(target: "les", "Received {} transactions to relay from peer {}", txs.len(), peer); for handler in &self.handlers { - handler.on_transactions(*peer, &txs); + handler.on_transactions(&Ctx { + peer: *peer, + io: io, + proto: self, + }, &txs); } Ok(()) @@ -757,60 +1064,15 @@ impl NetworkProtocolHandler for LightProtocol { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - let rlp = UntrustedRlp::new(data); - - // handle the packet - let res = match packet_id { - packet::STATUS => self.status(peer, rlp), - packet::ANNOUNCE => self.announcement(peer, rlp), - - packet::GET_BLOCK_HEADERS => self.get_block_headers(peer, io, rlp), - packet::BLOCK_HEADERS => self.block_headers(peer, io, rlp), - - packet::GET_BLOCK_BODIES => self.get_block_bodies(peer, io, rlp), - packet::BLOCK_BODIES => self.block_bodies(peer, io, rlp), - - packet::GET_RECEIPTS => self.get_receipts(peer, io, rlp), - packet::RECEIPTS => self.receipts(peer, io, rlp), - - packet::GET_PROOFS => self.get_proofs(peer, io, rlp), - packet::PROOFS => self.proofs(peer, io, rlp), - - packet::GET_CONTRACT_CODES => self.get_contract_code(peer, io, rlp), - packet::CONTRACT_CODES => self.contract_code(peer, io, rlp), - - packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), - packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), - - packet::SEND_TRANSACTIONS => self.relay_transactions(peer, rlp), - - other => { - Err(Error::UnrecognizedPacket(other)) - } - }; - - // if something went wrong, figure out how much to punish the peer. - if let Err(e) = res { - match e.punishment() { - Punishment::None => {} - Punishment::Disconnect => { - debug!(target: "les", "Disconnecting peer {}: {}", peer, e); - io.disconnect_peer(*peer) - } - Punishment::Disable => { - debug!(target: "les", "Disabling peer {}: {}", peer, e); - io.disable_peer(*peer) - } - } - } + self.handle_packet(io, peer, packet_id, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { self.on_connect(peer, io); } - fn disconnected(&self, _io: &NetworkContext, peer: &PeerId) { - self.on_disconnect(*peer); + fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { + self.on_disconnect(*peer, io); } fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index 2c0c5f79a..59981b88d 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -82,26 +82,6 @@ impl Key { } } -/// Network ID structure. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u32)] -pub enum NetworkId { - /// ID for the mainnet - Mainnet = 1, - /// ID for the testnet - Testnet = 0, -} - -impl NetworkId { - fn from_raw(raw: u32) -> Option { - match raw { - 0 => Some(NetworkId::Testnet), - 1 => Some(NetworkId::Mainnet), - _ => None, - } - } -} - // helper for decoding key-value pairs in the handshake or an announcement. struct Parser<'a> { pos: usize, @@ -118,6 +98,7 @@ impl<'a> Parser<'a> { // expect a specific next key, and get the value's RLP. // if the key isn't found, the position isn't advanced. fn expect_raw(&mut self, key: Key) -> Result, DecoderError> { + trace!(target: "les", "Expecting key {}", key.as_str()); let pre_pos = self.pos; if let Some((k, val)) = try!(self.get_next()) { if k == key { return Ok(val) } @@ -164,7 +145,7 @@ pub struct Status { /// Protocol version. pub protocol_version: u32, /// Network id of this peer. - pub network_id: NetworkId, + pub network_id: u64, /// Total difficulty of the head of the chain. pub head_td: U256, /// Hash of the best block. @@ -217,7 +198,7 @@ impl Capabilities { /// - chain status /// - serving capabilities /// - buffer flow parameters -pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, FlowParams), DecoderError> { +pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, Option), DecoderError> { let mut parser = Parser { pos: 0, rlp: rlp, @@ -225,8 +206,7 @@ pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, FlowP let status = Status { protocol_version: try!(parser.expect(Key::ProtocolVersion)), - network_id: try!(parser.expect(Key::NetworkId) - .and_then(|id: u32| NetworkId::from_raw(id).ok_or(DecoderError::Custom("Invalid network ID")))), + network_id: try!(parser.expect(Key::NetworkId)), head_td: try!(parser.expect(Key::HeadTD)), head_hash: try!(parser.expect(Key::HeadHash)), head_num: try!(parser.expect(Key::HeadNum)), @@ -241,20 +221,23 @@ pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, FlowP tx_relay: parser.expect_raw(Key::TxRelay).is_ok(), }; - let flow_params = FlowParams::new( - try!(parser.expect(Key::BufferLimit)), - try!(parser.expect(Key::BufferCostTable)), - try!(parser.expect(Key::BufferRechargeRate)), - ); + let flow_params = match ( + parser.expect(Key::BufferLimit), + parser.expect(Key::BufferCostTable), + parser.expect(Key::BufferRechargeRate) + ) { + (Ok(bl), Ok(bct), Ok(brr)) => Some(FlowParams::new(bl, bct, brr)), + _ => None, + }; Ok((status, capabilities, flow_params)) } /// Write a handshake, given status, capabilities, and flow parameters. -pub fn write_handshake(status: &Status, capabilities: &Capabilities, flow_params: &FlowParams) -> Vec { +pub fn write_handshake(status: &Status, capabilities: &Capabilities, flow_params: Option<&FlowParams>) -> Vec { let mut pairs = Vec::new(); pairs.push(encode_pair(Key::ProtocolVersion, &status.protocol_version)); - pairs.push(encode_pair(Key::NetworkId, &(status.network_id as u32))); + pairs.push(encode_pair(Key::NetworkId, &(status.network_id as u64))); pairs.push(encode_pair(Key::HeadTD, &status.head_td)); pairs.push(encode_pair(Key::HeadHash, &status.head_hash)); pairs.push(encode_pair(Key::HeadNum, &status.head_num)); @@ -273,9 +256,11 @@ pub fn write_handshake(status: &Status, capabilities: &Capabilities, flow_params pairs.push(encode_flag(Key::TxRelay)); } - pairs.push(encode_pair(Key::BufferLimit, flow_params.limit())); - pairs.push(encode_pair(Key::BufferCostTable, flow_params.cost_table())); - pairs.push(encode_pair(Key::BufferRechargeRate, flow_params.recharge_rate())); + if let Some(flow_params) = flow_params { + pairs.push(encode_pair(Key::BufferLimit, flow_params.limit())); + pairs.push(encode_pair(Key::BufferCostTable, flow_params.cost_table())); + pairs.push(encode_pair(Key::BufferRechargeRate, flow_params.recharge_rate())); + } let mut stream = RlpStream::new_list(pairs.len()); @@ -385,7 +370,7 @@ mod tests { fn full_handshake() { let status = Status { protocol_version: 1, - network_id: NetworkId::Mainnet, + network_id: 1, head_td: U256::default(), head_hash: H256::default(), head_num: 10, @@ -406,21 +391,21 @@ mod tests { 1000.into(), ); - let handshake = write_handshake(&status, &capabilities, &flow_params); + let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); let (read_status, read_capabilities, read_flow) = parse_handshake(UntrustedRlp::new(&handshake)).unwrap(); assert_eq!(read_status, status); assert_eq!(read_capabilities, capabilities); - assert_eq!(read_flow, flow_params); + assert_eq!(read_flow.unwrap(), flow_params); } #[test] fn partial_handshake() { let status = Status { protocol_version: 1, - network_id: NetworkId::Mainnet, + network_id: 1, head_td: U256::default(), head_hash: H256::default(), head_num: 10, @@ -441,21 +426,21 @@ mod tests { 1000.into(), ); - let handshake = write_handshake(&status, &capabilities, &flow_params); + let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); let (read_status, read_capabilities, read_flow) = parse_handshake(UntrustedRlp::new(&handshake)).unwrap(); assert_eq!(read_status, status); assert_eq!(read_capabilities, capabilities); - assert_eq!(read_flow, flow_params); + assert_eq!(read_flow.unwrap(), flow_params); } #[test] fn skip_unknown_keys() { let status = Status { protocol_version: 1, - network_id: NetworkId::Mainnet, + network_id: 1, head_td: U256::default(), head_hash: H256::default(), head_num: 10, @@ -476,7 +461,7 @@ mod tests { 1000.into(), ); - let handshake = write_handshake(&status, &capabilities, &flow_params); + let handshake = write_handshake(&status, &capabilities, Some(&flow_params)); let interleaved = { let handshake = UntrustedRlp::new(&handshake); let mut stream = RlpStream::new_list(handshake.item_count() * 3); @@ -498,7 +483,7 @@ mod tests { assert_eq!(read_status, status); assert_eq!(read_capabilities, capabilities); - assert_eq!(read_flow, flow_params); + assert_eq!(read_flow.unwrap(), flow_params); } #[test] @@ -548,4 +533,33 @@ mod tests { let out = stream.drain(); assert!(parse_announcement(UntrustedRlp::new(&out)).is_ok()); } + + #[test] + fn optional_flow() { + let status = Status { + protocol_version: 1, + network_id: 1, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; + + let capabilities = Capabilities { + serve_headers: true, + serve_chain_since: Some(5), + serve_state_since: Some(8), + tx_relay: true, + }; + + let handshake = write_handshake(&status, &capabilities, None); + + let (read_status, read_capabilities, read_flow) + = parse_handshake(UntrustedRlp::new(&handshake)).unwrap(); + + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert!(read_flow.is_none()); + } } \ No newline at end of file diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs new file mode 100644 index 000000000..876432ce2 --- /dev/null +++ b/ethcore/light/src/net/tests/mod.rs @@ -0,0 +1,512 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tests for the `LightProtocol` implementation. +//! These don't test of the higher level logic on top of + +use ethcore::blockchain_info::BlockChainInfo; +use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient}; +use ethcore::ids::BlockId; +use ethcore::transaction::SignedTransaction; +use network::PeerId; + +use net::buffer_flow::FlowParams; +use net::context::IoContext; +use net::status::{Capabilities, Status, write_handshake}; +use net::{encode_request, LightProtocol, Params, packet}; +use provider::Provider; +use request::{self, Request, Headers}; + +use rlp::*; +use util::{Bytes, H256, U256}; + +use std::sync::Arc; + +// expected result from a call. +#[derive(Debug, PartialEq, Eq)] +enum Expect { + /// Expect to have message sent to peer. + Send(PeerId, u8, Vec), + /// Expect this response. + Respond(u8, Vec), + /// Expect a punishment (disconnect/disable) + Punish(PeerId), + /// Expect nothing. + Nothing, +} + +impl IoContext for Expect { + fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { + assert_eq!(self, &Expect::Send(peer, packet_id, packet_body)); + } + + fn respond(&self, packet_id: u8, packet_body: Vec) { + assert_eq!(self, &Expect::Respond(packet_id, packet_body)); + } + + fn disconnect_peer(&self, peer: PeerId) { + assert_eq!(self, &Expect::Punish(peer)); + } + + fn disable_peer(&self, peer: PeerId) { + assert_eq!(self, &Expect::Punish(peer)); + } + + fn protocol_version(&self, _peer: PeerId) -> Option { + Some(super::MAX_PROTOCOL_VERSION) + } +} + +// can't implement directly for Arc due to cross-crate orphan rules. +struct TestProvider(Arc); + +struct TestProviderInner { + client: TestBlockChainClient, +} + +impl Provider for TestProvider { + fn chain_info(&self) -> BlockChainInfo { + self.0.client.chain_info() + } + + fn reorg_depth(&self, a: &H256, b: &H256) -> Option { + self.0.client.tree_route(a, b).map(|route| route.index as u64) + } + + fn earliest_state(&self) -> Option { + None + } + + fn block_headers(&self, req: request::Headers) -> Vec { + let best_num = self.0.client.chain_info().best_block_number; + let start_num = req.block_num; + + match self.0.client.block_hash(BlockId::Number(req.block_num)) { + Some(hash) if hash == req.block_hash => {} + _=> { + trace!(target: "les_provider", "unknown/non-canonical start block in header request: {:?}", (req.block_num, req.block_hash)); + return vec![] + } + } + + (0u64..req.max as u64) + .map(|x: u64| x.saturating_mul(req.skip + 1)) + .take_while(|x| if req.reverse { x < &start_num } else { best_num - start_num >= *x }) + .map(|x| if req.reverse { start_num - x } else { start_num + x }) + .map(|x| self.0.client.block_header(BlockId::Number(x))) + .take_while(|x| x.is_some()) + .flat_map(|x| x) + .collect() + } + + fn block_bodies(&self, req: request::Bodies) -> Vec { + req.block_hashes.into_iter() + .map(|hash| self.0.client.block_body(BlockId::Hash(hash))) + .map(|body| body.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec())) + .collect() + } + + fn receipts(&self, req: request::Receipts) -> Vec { + req.block_hashes.into_iter() + .map(|hash| self.0.client.block_receipts(&hash)) + .map(|receipts| receipts.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec())) + .collect() + } + + fn proofs(&self, req: request::StateProofs) -> Vec { + req.requests.into_iter() + .map(|req| { + match req.key2 { + Some(_) => ::util::sha3::SHA3_NULL_RLP.to_vec(), + None => { + // sort of a leaf node + let mut stream = RlpStream::new_list(2); + stream.append(&req.key1).append_empty_data(); + stream.out() + } + } + }) + .collect() + } + + fn contract_code(&self, req: request::ContractCodes) -> Vec { + req.code_requests.into_iter() + .map(|req| { + req.account_key.iter().chain(req.account_key.iter()).cloned().collect() + }) + .collect() + } + + fn header_proofs(&self, req: request::HeaderProofs) -> Vec { + req.requests.into_iter().map(|_| ::rlp::EMPTY_LIST_RLP.to_vec()).collect() + } + + fn pending_transactions(&self) -> Vec { + self.0.client.pending_transactions() + } +} + +fn make_flow_params() -> FlowParams { + FlowParams::new(5_000_000.into(), Default::default(), 100_000.into()) +} + +fn capabilities() -> Capabilities { + Capabilities { + serve_headers: true, + serve_chain_since: Some(1), + serve_state_since: Some(1), + tx_relay: true, + } +} + +// helper for setting up the protocol handler and provider. +fn setup(flow_params: FlowParams, capabilities: Capabilities) -> (Arc, LightProtocol) { + let provider = Arc::new(TestProviderInner { + client: TestBlockChainClient::new(), + }); + + let proto = LightProtocol::new(Arc::new(TestProvider(provider.clone())), Params { + network_id: 2, + flow_params: flow_params, + capabilities: capabilities, + }); + + (provider, proto) +} + +fn status(chain_info: BlockChainInfo) -> Status { + Status { + protocol_version: 1, + network_id: 2, + head_td: chain_info.total_difficulty, + head_hash: chain_info.best_block_hash, + head_num: chain_info.best_block_number, + genesis_hash: chain_info.genesis_hash, + last_head: None, + } +} + +#[test] +fn handshake_expected() { + let flow_params = make_flow_params(); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let status = status(provider.client.chain_info()); + + let packet_body = write_handshake(&status, &capabilities, Some(&flow_params)); + + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); +} + +#[test] +#[should_panic] +fn genesis_mismatch() { + let flow_params = make_flow_params(); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let mut status = status(provider.client.chain_info()); + status.genesis_hash = H256::default(); + + let packet_body = write_handshake(&status, &capabilities, Some(&flow_params)); + + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); +} + +#[test] +fn buffer_overflow() { + let flow_params = make_flow_params(); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); + } + + { + let my_status = write_handshake(&status, &capabilities, Some(&flow_params)); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); + } + + // 1000 requests is far too many for the default flow params. + let request = encode_request(&Request::Headers(Headers { + block_num: 1, + block_hash: provider.client.chain_info().genesis_hash, + max: 1000, + skip: 0, + reverse: false, + }), 111); + + proto.handle_packet(&Expect::Punish(1), &1, packet::GET_BLOCK_HEADERS, &request); +} + +// test the basic request types -- these just make sure that requests are parsed +// and sent to the provider correctly as well as testing response formatting. + +#[test] +fn get_block_headers() { + let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let cur_status = status(provider.client.chain_info()); + let my_status = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + + provider.client.add_blocks(100, EachBlockWith::Nothing); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); + } + + let request = Headers { + block_num: 1, + block_hash: provider.client.block_hash(BlockId::Number(1)).unwrap(), + max: 10, + skip: 0, + reverse: false, + }; + let req_id = 111; + + let request_body = encode_request(&Request::Headers(request.clone()), req_id); + let response = { + let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); + assert_eq!(headers.len(), 10); + + let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); + + let mut response_stream = RlpStream::new_list(12); + + response_stream.append(&req_id).append(&new_buf); + for header in headers { + response_stream.append_raw(&header, 1); + } + + response_stream.out() + }; + + let expected = Expect::Respond(packet::BLOCK_HEADERS, response); + proto.handle_packet(&expected, &1, packet::GET_BLOCK_HEADERS, &request_body); +} + +#[test] +fn get_block_bodies() { + let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let cur_status = status(provider.client.chain_info()); + let my_status = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + + provider.client.add_blocks(100, EachBlockWith::Nothing); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); + } + + let request = request::Bodies { + block_hashes: (0..10).map(|i| provider.client.block_hash(BlockId::Number(i)).unwrap()).collect(), + }; + + let req_id = 111; + + let request_body = encode_request(&Request::Bodies(request.clone()), req_id); + let response = { + let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect(); + assert_eq!(bodies.len(), 10); + + let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); + + let mut response_stream = RlpStream::new_list(12); + + response_stream.append(&req_id).append(&new_buf); + for body in bodies { + response_stream.append_raw(&body, 1); + } + + response_stream.out() + }; + + let expected = Expect::Respond(packet::BLOCK_BODIES, response); + proto.handle_packet(&expected, &1, packet::GET_BLOCK_BODIES, &request_body); +} + +#[test] +fn get_block_receipts() { + let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let cur_status = status(provider.client.chain_info()); + let my_status = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + + provider.client.add_blocks(1000, EachBlockWith::Nothing); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body)); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); + } + + // find the first 10 block hashes starting with `f` because receipts are only provided + // by the test client in that case. + let block_hashes: Vec<_> = (0..1000).map(|i| provider.client.block_hash(BlockId::Number(i)).unwrap()) + .filter(|hash| format!("{}", hash).starts_with("f")).take(10).collect(); + + let request = request::Receipts { + block_hashes: block_hashes.clone(), + }; + + let req_id = 111; + + let request_body = encode_request(&Request::Receipts(request.clone()), req_id); + let response = { + let receipts: Vec<_> = block_hashes.iter() + .map(|hash| provider.client.block_receipts(hash).unwrap()) + .collect(); + + let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); + + let mut response_stream = RlpStream::new_list(2 + receipts.len()); + + response_stream.append(&req_id).append(&new_buf); + for block_receipts in receipts { + response_stream.append_raw(&block_receipts, 1); + } + + response_stream.out() + }; + + let expected = Expect::Respond(packet::RECEIPTS, response); + proto.handle_packet(&expected, &1, packet::GET_RECEIPTS, &request_body); +} + +#[test] +fn get_state_proofs() { + let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); + } + + let req_id = 112; + let key1 = U256::from(11223344).into(); + let key2 = U256::from(99988887).into(); + + let request = Request::StateProofs (request::StateProofs { + requests: vec![ + request::StateProof { block: H256::default(), key1: key1, key2: None, from_level: 0 }, + request::StateProof { block: H256::default(), key1: key1, key2: Some(key2), from_level: 0}, + ] + }); + + let request_body = encode_request(&request, req_id); + let response = { + let proofs = vec![ + { let mut stream = RlpStream::new_list(2); stream.append(&key1).append_empty_data(); stream.out() }, + ::util::sha3::SHA3_NULL_RLP.to_vec(), + ]; + + let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); + + let mut response_stream = RlpStream::new_list(4); + + response_stream.append(&req_id).append(&new_buf); + for proof in proofs { + response_stream.append_raw(&proof, 1); + } + + response_stream.out() + }; + + let expected = Expect::Respond(packet::PROOFS, response); + proto.handle_packet(&expected, &1, packet::GET_PROOFS, &request_body); +} + +#[test] +fn get_contract_code() { + let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); + } + + let req_id = 112; + let key1 = U256::from(11223344).into(); + let key2 = U256::from(99988887).into(); + + let request = Request::Codes (request::ContractCodes { + code_requests: vec![ + request::ContractCode { block_hash: H256::default(), account_key: key1 }, + request::ContractCode { block_hash: H256::default(), account_key: key2 }, + ], + }); + + let request_body = encode_request(&request, req_id); + let response = { + let codes: Vec> = vec![ + key1.iter().chain(key1.iter()).cloned().collect(), + key2.iter().chain(key2.iter()).cloned().collect(), + ]; + + let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); + + let mut response_stream = RlpStream::new_list(4); + + response_stream.append(&req_id).append(&new_buf); + for code in codes { + response_stream.append(&code); + } + + response_stream.out() + }; + + let expected = Expect::Respond(packet::CONTRACT_CODES, response); + proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body); +} \ No newline at end of file diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 264df0397..ad8d8ea16 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -20,7 +20,7 @@ use ethcore::blockchain_info::BlockChainInfo; use ethcore::client::{BlockChainClient, ProvingBlockChainClient}; use ethcore::transaction::SignedTransaction; -use ethcore::ids::BlockID; +use ethcore::ids::BlockId; use util::{Bytes, H256}; @@ -33,6 +33,7 @@ use request; /// or empty vector where appropriate. /// /// [1]: https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) +#[cfg_attr(feature = "ipc", ipc(client_ident="LightProviderClient"))] pub trait Provider: Send + Sync { /// Provide current blockchain info. fn chain_info(&self) -> BlockChainInfo; @@ -71,7 +72,10 @@ pub trait Provider: Send + Sync { /// Each item in the resulting vector is either the raw bytecode or empty. fn contract_code(&self, req: request::ContractCodes) -> Vec; - /// Provide header proofs from the Canonical Hash Tries. + /// Provide header proofs from the Canonical Hash Tries as well as the headers + /// they correspond to -- each element in the returned vector is a 2-tuple. + /// The first element is a block header and the second a merkle proof of + /// the header in a requested CHT. fn header_proofs(&self, req: request::HeaderProofs) -> Vec; /// Provide pending transactions. @@ -96,7 +100,7 @@ impl Provider for T { let best_num = self.chain_info().best_block_number; let start_num = req.block_num; - match self.block_hash(BlockID::Number(req.block_num)) { + match self.block_hash(BlockId::Number(req.block_num)) { Some(hash) if hash == req.block_hash => {} _=> { trace!(target: "les_provider", "unknown/non-canonical start block in header request: {:?}", (req.block_num, req.block_hash)); @@ -105,10 +109,10 @@ impl Provider for T { } (0u64..req.max as u64) - .map(|x: u64| x.saturating_mul(req.skip)) - .take_while(|x| if req.reverse { x < &start_num } else { best_num - start_num < *x }) + .map(|x: u64| x.saturating_mul(req.skip + 1)) + .take_while(|x| if req.reverse { x < &start_num } else { best_num - start_num >= *x }) .map(|x| if req.reverse { start_num - x } else { start_num + x }) - .map(|x| self.block_header(BlockID::Number(x))) + .map(|x| self.block_header(BlockId::Number(x))) .take_while(|x| x.is_some()) .flat_map(|x| x) .collect() @@ -116,7 +120,7 @@ impl Provider for T { fn block_bodies(&self, req: request::Bodies) -> Vec { req.block_hashes.into_iter() - .map(|hash| self.block_body(BlockID::Hash(hash))) + .map(|hash| self.block_body(BlockId::Hash(hash))) .map(|body| body.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec())) .collect() } @@ -135,8 +139,8 @@ impl Provider for T { for request in req.requests { let proof = match request.key2 { - Some(key2) => self.prove_storage(request.key1, key2, request.from_level, BlockID::Hash(request.block)), - None => self.prove_account(request.key1, request.from_level, BlockID::Hash(request.block)), + Some(key2) => self.prove_storage(request.key1, key2, request.from_level, BlockId::Hash(request.block)), + None => self.prove_account(request.key1, request.from_level, BlockId::Hash(request.block)), }; let mut stream = RlpStream::new_list(proof.len()); @@ -153,7 +157,7 @@ impl Provider for T { fn contract_code(&self, req: request::ContractCodes) -> Vec { req.code_requests.into_iter() .map(|req| { - self.code_by_hash(req.account_key, BlockID::Hash(req.block_hash)) + self.code_by_hash(req.account_key, BlockId::Hash(req.block_hash)) }) .collect() } diff --git a/ethcore/light/src/types/les_request.rs b/ethcore/light/src/types/les_request.rs index d0de080ee..49bd2e9cc 100644 --- a/ethcore/light/src/types/les_request.rs +++ b/ethcore/light/src/types/les_request.rs @@ -19,7 +19,8 @@ use util::H256; /// A request for block headers. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct Headers { /// Starting block number pub block_num: u64, @@ -35,7 +36,8 @@ pub struct Headers { } /// A request for specific block bodies. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct Bodies { /// Hashes which bodies are being requested for. pub block_hashes: Vec @@ -45,14 +47,16 @@ pub struct Bodies { /// /// This request is answered with a list of transaction receipts for each block /// requested. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct Receipts { /// Block hashes to return receipts for. pub block_hashes: Vec, } /// A request for a state proof -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct StateProof { /// Block hash to query state from. pub block: H256, @@ -66,14 +70,16 @@ pub struct StateProof { } /// A request for state proofs. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct StateProofs { /// All the proof requests. pub requests: Vec, } /// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct ContractCode { /// Block hash pub block_hash: H256, @@ -82,14 +88,16 @@ pub struct ContractCode { } /// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct ContractCodes { /// Block hash and account key (== sha3(address)) pairs to fetch code for. pub code_requests: Vec, } /// A request for a header proof from the Canonical Hash Trie. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct HeaderProof { /// Number of the CHT. pub cht_number: u64, @@ -100,14 +108,16 @@ pub struct HeaderProof { } /// A request for header proofs from the CHT. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub struct HeaderProofs { /// All the proof requests. pub requests: Vec, } /// Kinds of requests. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub enum Kind { /// Requesting headers. Headers, @@ -124,7 +134,8 @@ pub enum Kind { } /// Encompasses all possible types of requests in a single structure. -#[derive(Debug, Clone, PartialEq, Eq, Binary)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", derive(Binary))] pub enum Request { /// Requesting headers. Headers(Headers), diff --git a/ethcore/light/src/types/mod.rs b/ethcore/light/src/types/mod.rs index 2625358a3..d7f473553 100644 --- a/ethcore/light/src/types/mod.rs +++ b/ethcore/light/src/types/mod.rs @@ -15,6 +15,11 @@ // along with Parity. If not, see . //! Types used in the public (IPC) api which require custom code generation. +#![cfg_attr(feature = "ipc", allow(dead_code, unused_assignments, unused_variables))] // codegen issues -#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues + +#[cfg(feature = "ipc")] include!(concat!(env!("OUT_DIR"), "/mod.rs.in")); + +#[cfg(not(feature = "ipc"))] +include!("mod.rs.in"); \ No newline at end of file diff --git a/ethcore/res/ethereum/frontier.json b/ethcore/res/ethereum/frontier.json index 2c520c46a..3a9dce456 100644 --- a/ethcore/res/ethereum/frontier.json +++ b/ethcore/res/ethereum/frontier.json @@ -170,9 +170,6 @@ "enode://cadc6e573b6bc2a9128f2f635ac0db3353e360b56deef239e9be7e7fce039502e0ec670b595f6288c0d2116812516ad6b6ff8d5728ff45eba176989e40dead1e@37.128.191.230:30303", "enode://595a9a06f8b9bc9835c8723b6a82105aea5d55c66b029b6d44f229d6d135ac3ecdd3e9309360a961ea39d7bee7bac5d03564077a4e08823acc723370aace65ec@46.20.235.22:30303", "enode://029178d6d6f9f8026fc0bc17d5d1401aac76ec9d86633bba2320b5eed7b312980c0a210b74b20c4f9a8b0b2bf884b111fa9ea5c5f916bb9bbc0e0c8640a0f56c@216.158.85.185:30303", - "enode://84f5d5957b4880a8b0545e32e05472318898ad9fc8ebe1d56c90c12334a98e12351eccfdf3a2bf72432ac38b57e9d348400d17caa083879ade3822390f89773f@10.1.52.78:30303", - "enode://f90dc9b9bf7b8db97726b7849e175f1eb2707f3d8f281c929336e398dd89b0409fc6aeceb89e846278e9d3ecc3857cebfbe6758ff352ece6fe5d42921ee761db@10.1.173.87:30303", - "enode://6a868ced2dec399c53f730261173638a93a40214cf299ccf4d42a76e3fa54701db410669e8006347a4b3a74fa090bb35af0320e4bc8d04cf5b7f582b1db285f5@10.3.149.199:30303", "enode://fdd1b9bb613cfbc200bba17ce199a9490edc752a833f88d4134bf52bb0d858aa5524cb3ec9366c7a4ef4637754b8b15b5dc913e4ed9fdb6022f7512d7b63f181@212.47.247.103:30303", "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index a2c83f1ce..59fbda045 100644 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -213,7 +213,7 @@ impl AccountProvider { Ok(AccountMeta { name: try!(self.sstore.name(&account)), meta: try!(self.sstore.meta(&account)), - uuid: self.sstore.uuid(&account).ok().map(Into::into), // allowed to not have a UUID + uuid: self.sstore.uuid(&account).ok().map(Into::into), // allowed to not have a Uuid }) } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 0e8e0a271..c69cf3336 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -146,7 +146,7 @@ pub trait BlockProvider { } #[derive(Debug, Hash, Eq, PartialEq, Clone)] -enum CacheID { +enum CacheId { BlockHeader(H256), BlockBody(H256), BlockDetails(H256), @@ -160,7 +160,7 @@ impl bc::group::BloomGroupDatabase for BlockChain { fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option { let position = LogGroupPosition::from(position.clone()); let result = self.db.read_with_cache(db::COL_EXTRA, &self.blocks_blooms, &position).map(Into::into); - self.cache_man.lock().note_used(CacheID::BlocksBlooms(position)); + self.cache_man.lock().note_used(CacheId::BlocksBlooms(position)); result } } @@ -193,7 +193,7 @@ pub struct BlockChain { db: Arc, - cache_man: Mutex>, + cache_man: Mutex>, pending_best_block: RwLock>, pending_block_hashes: RwLock>, @@ -270,7 +270,7 @@ impl BlockProvider for BlockChain { None => None }; - self.cache_man.lock().note_used(CacheID::BlockHeader(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockHeader(hash.clone())); result } @@ -306,7 +306,7 @@ impl BlockProvider for BlockChain { None => None }; - self.cache_man.lock().note_used(CacheID::BlockBody(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockBody(hash.clone())); result } @@ -314,28 +314,28 @@ impl BlockProvider for BlockChain { /// Get the familial details concerning a block. fn block_details(&self, hash: &H256) -> Option { let result = self.db.read_with_cache(db::COL_EXTRA, &self.block_details, hash); - self.cache_man.lock().note_used(CacheID::BlockDetails(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockDetails(hash.clone())); result } /// Get the hash of given block's number. fn block_hash(&self, index: BlockNumber) -> Option { let result = self.db.read_with_cache(db::COL_EXTRA, &self.block_hashes, &index); - self.cache_man.lock().note_used(CacheID::BlockHashes(index)); + self.cache_man.lock().note_used(CacheId::BlockHashes(index)); result } /// Get the address of transaction with given hash. fn transaction_address(&self, hash: &H256) -> Option { let result = self.db.read_with_cache(db::COL_EXTRA, &self.transaction_addresses, hash); - self.cache_man.lock().note_used(CacheID::TransactionAddresses(hash.clone())); + self.cache_man.lock().note_used(CacheId::TransactionAddresses(hash.clone())); result } /// Get receipts of block with given hash. fn block_receipts(&self, hash: &H256) -> Option { let result = self.db.read_with_cache(db::COL_EXTRA, &self.block_receipts, hash); - self.cache_man.lock().note_used(CacheID::BlockReceipts(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockReceipts(hash.clone())); result } @@ -809,7 +809,7 @@ impl BlockChain { let mut write_details = self.block_details.write(); batch.extend_with_cache(db::COL_EXTRA, &mut *write_details, update, CacheUpdatePolicy::Overwrite); - self.cache_man.lock().note_used(CacheID::BlockDetails(block_hash)); + self.cache_man.lock().note_used(CacheId::BlockDetails(block_hash)); } #[cfg_attr(feature="dev", allow(similar_names))] @@ -968,15 +968,15 @@ impl BlockChain { let mut cache_man = self.cache_man.lock(); for n in pending_hashes_keys { - cache_man.note_used(CacheID::BlockHashes(n)); + cache_man.note_used(CacheId::BlockHashes(n)); } for hash in enacted_txs_keys { - cache_man.note_used(CacheID::TransactionAddresses(hash)); + cache_man.note_used(CacheId::TransactionAddresses(hash)); } for hash in pending_block_hashes { - cache_man.note_used(CacheID::BlockDetails(hash)); + cache_man.note_used(CacheId::BlockDetails(hash)); } } @@ -1244,13 +1244,13 @@ impl BlockChain { cache_man.collect_garbage(current_size, | ids | { for id in &ids { match *id { - CacheID::BlockHeader(ref h) => { block_headers.remove(h); }, - CacheID::BlockBody(ref h) => { block_bodies.remove(h); }, - CacheID::BlockDetails(ref h) => { block_details.remove(h); } - CacheID::BlockHashes(ref h) => { block_hashes.remove(h); } - CacheID::TransactionAddresses(ref h) => { transaction_addresses.remove(h); } - CacheID::BlocksBlooms(ref h) => { blocks_blooms.remove(h); } - CacheID::BlockReceipts(ref h) => { block_receipts.remove(h); } + CacheId::BlockHeader(ref h) => { block_headers.remove(h); }, + CacheId::BlockBody(ref h) => { block_bodies.remove(h); }, + CacheId::BlockDetails(ref h) => { block_details.remove(h); } + CacheId::BlockHashes(ref h) => { block_hashes.remove(h); } + CacheId::TransactionAddresses(ref h) => { transaction_addresses.remove(h); } + CacheId::BlocksBlooms(ref h) => { blocks_blooms.remove(h); } + CacheId::BlockReceipts(ref h) => { block_receipts.remove(h); } } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 30b5b4653..f14a47bdc 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -50,9 +50,9 @@ use log_entry::LocalizedLogEntry; use verification::queue::BlockQueue; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use client::{ - BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, + BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, - ChainNotify, PruningInfo, ProvingBlockChainClient, + ChainNotify, PruningInfo, }; use client::Error as ClientError; use env_info::EnvInfo; @@ -580,13 +580,13 @@ impl Client { /// Attempt to get a copy of a specific block's final state. /// - /// This will not fail if given BlockID::Latest. + /// This will not fail if given BlockId::Latest. /// Otherwise, this can fail (but may not) if the DB prunes state. - pub fn state_at(&self, id: BlockID) -> Option { + pub fn state_at(&self, id: BlockId) -> Option { // fast path for latest state. match id.clone() { - BlockID::Pending => return self.miner.pending_state().or_else(|| Some(self.state())), - BlockID::Latest => return Some(self.state()), + BlockId::Pending => return self.miner.pending_state().or_else(|| Some(self.state())), + BlockId::Latest => return Some(self.state()), _ => {}, } @@ -611,15 +611,15 @@ impl Client { /// Attempt to get a copy of a specific block's beginning state. /// - /// This will not fail if given BlockID::Latest. + /// This will not fail if given BlockId::Latest. /// Otherwise, this can fail (but may not) if the DB prunes state. - pub fn state_at_beginning(&self, id: BlockID) -> Option { + pub fn state_at_beginning(&self, id: BlockId) -> Option { // fast path for latest state. match id { - BlockID::Pending => self.state_at(BlockID::Latest), + BlockId::Pending => self.state_at(BlockId::Latest), id => match self.block_number(id) { None | Some(0) => None, - Some(n) => self.state_at(BlockID::Number(n - 1)), + Some(n) => self.state_at(BlockId::Number(n - 1)), } } } @@ -689,18 +689,18 @@ impl Client { } /// Look up the block number for the given block ID. - pub fn block_number(&self, id: BlockID) -> Option { + pub fn block_number(&self, id: BlockId) -> Option { match id { - BlockID::Number(number) => Some(number), - BlockID::Hash(ref hash) => self.chain.read().block_number(hash), - BlockID::Earliest => Some(0), - BlockID::Latest | BlockID::Pending => Some(self.chain.read().best_block_number()), + BlockId::Number(number) => Some(number), + BlockId::Hash(ref hash) => self.chain.read().block_number(hash), + BlockId::Earliest => Some(0), + BlockId::Latest | BlockId::Pending => Some(self.chain.read().best_block_number()), } } /// Take a snapshot at the given block. /// If the ID given is "latest", this will default to 1000 blocks behind. - pub fn take_snapshot(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), EthcoreError> { + pub fn take_snapshot(&self, writer: W, at: BlockId, p: &snapshot::Progress) -> Result<(), EthcoreError> { let db = self.state_db.lock().journal_db().boxed_clone(); let best_block_number = self.chain_info().best_block_number; let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))); @@ -712,13 +712,13 @@ impl Client { let history = ::std::cmp::min(self.history, 1000); let start_hash = match at { - BlockID::Latest => { + BlockId::Latest => { let start_num = match db.earliest_era() { Some(era) => ::std::cmp::max(era, best_block_number - history), None => best_block_number - history, }; - match self.block_hash(BlockID::Number(start_num)) { + match self.block_hash(BlockId::Number(start_num)) { Some(h) => h, None => return Err(snapshot::Error::InvalidStartingBlock(at).into()), } @@ -739,19 +739,19 @@ impl Client { self.history } - fn block_hash(chain: &BlockChain, id: BlockID) -> Option { + fn block_hash(chain: &BlockChain, id: BlockId) -> Option { match id { - BlockID::Hash(hash) => Some(hash), - BlockID::Number(number) => chain.block_hash(number), - BlockID::Earliest => chain.block_hash(0), - BlockID::Latest | BlockID::Pending => Some(chain.best_block_hash()), + BlockId::Hash(hash) => Some(hash), + BlockId::Number(number) => chain.block_hash(number), + BlockId::Earliest => chain.block_hash(0), + BlockId::Latest | BlockId::Pending => Some(chain.best_block_hash()), } } - fn transaction_address(&self, id: TransactionID) -> Option { + fn transaction_address(&self, id: TransactionId) -> Option { match id { - TransactionID::Hash(ref hash) => self.chain.read().transaction_address(hash), - TransactionID::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress { + TransactionId::Hash(ref hash) => self.chain.read().transaction_address(hash), + TransactionId::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress { block_hash: hash, index: index, }) @@ -805,7 +805,7 @@ impl snapshot::DatabaseRestore for Client { impl BlockChainClient for Client { - fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result { + fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result { let header = try!(self.block_header(block).ok_or(CallError::StatePruned)); let view = HeaderView::new(&header); let last_hashes = self.build_last_hashes(view.parent_hash()); @@ -841,11 +841,11 @@ impl BlockChainClient for Client { Ok(ret) } - fn replay(&self, id: TransactionID, analytics: CallAnalytics) -> Result { + fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result { let address = try!(self.transaction_address(id).ok_or(CallError::TransactionNotFound)); - let header_data = try!(self.block_header(BlockID::Hash(address.block_hash)).ok_or(CallError::StatePruned)); - let body_data = try!(self.block_body(BlockID::Hash(address.block_hash)).ok_or(CallError::StatePruned)); - let mut state = try!(self.state_at_beginning(BlockID::Hash(address.block_hash)).ok_or(CallError::StatePruned)); + let header_data = try!(self.block_header(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)); + let body_data = try!(self.block_body(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)); + let mut state = try!(self.state_at_beginning(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)); let txs = BodyView::new(&body_data).transactions(); if address.index >= txs.len() { @@ -918,18 +918,18 @@ impl BlockChainClient for Client { self.chain.read().best_block_header() } - fn block_header(&self, id: BlockID) -> Option { + fn block_header(&self, id: BlockId) -> Option { let chain = self.chain.read(); Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash)) } - fn block_body(&self, id: BlockID) -> Option { + fn block_body(&self, id: BlockId) -> Option { let chain = self.chain.read(); Self::block_hash(&chain, id).and_then(|hash| chain.block_body(&hash)) } - fn block(&self, id: BlockID) -> Option { - if let BlockID::Pending = id { + fn block(&self, id: BlockId) -> Option { + if let BlockId::Pending = id { if let Some(block) = self.miner.pending_block() { return Some(block.rlp_bytes(Seal::Without)); } @@ -940,7 +940,7 @@ impl BlockChainClient for Client { }) } - fn block_status(&self, id: BlockID) -> BlockStatus { + fn block_status(&self, id: BlockId) -> BlockStatus { let chain = self.chain.read(); match Self::block_hash(&chain, id) { Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, @@ -949,42 +949,42 @@ impl BlockChainClient for Client { } } - fn block_total_difficulty(&self, id: BlockID) -> Option { - if let BlockID::Pending = id { + fn block_total_difficulty(&self, id: BlockId) -> Option { + if let BlockId::Pending = id { if let Some(block) = self.miner.pending_block() { - return Some(*block.header.difficulty() + self.block_total_difficulty(BlockID::Latest).expect("blocks in chain have details; qed")); + return Some(*block.header.difficulty() + self.block_total_difficulty(BlockId::Latest).expect("blocks in chain have details; qed")); } } let chain = self.chain.read(); Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } - fn nonce(&self, address: &Address, id: BlockID) -> Option { + fn nonce(&self, address: &Address, id: BlockId) -> Option { self.state_at(id).map(|s| s.nonce(address)) } - fn storage_root(&self, address: &Address, id: BlockID) -> Option { + fn storage_root(&self, address: &Address, id: BlockId) -> Option { self.state_at(id).and_then(|s| s.storage_root(address)) } - fn block_hash(&self, id: BlockID) -> Option { + fn block_hash(&self, id: BlockId) -> Option { let chain = self.chain.read(); Self::block_hash(&chain, id) } - fn code(&self, address: &Address, id: BlockID) -> Option> { + fn code(&self, address: &Address, id: BlockId) -> Option> { self.state_at(id).map(|s| s.code(address).map(|c| (*c).clone())) } - fn balance(&self, address: &Address, id: BlockID) -> Option { + fn balance(&self, address: &Address, id: BlockId) -> Option { self.state_at(id).map(|s| s.balance(address)) } - fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option { + fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option { self.state_at(id).map(|s| s.storage_at(address, position)) } - fn list_accounts(&self, id: BlockID, after: Option<&Address>, count: u64) -> Option> { + fn list_accounts(&self, id: BlockId, after: Option<&Address>, count: u64) -> Option> { if !self.factories.trie.is_fat() { trace!(target: "fatdb", "list_accounts: Not a fat DB"); return None; @@ -1022,7 +1022,7 @@ impl BlockChainClient for Client { Some(accounts) } - fn list_storage(&self, id: BlockID, account: &Address, after: Option<&H256>, count: u64) -> Option> { + fn list_storage(&self, id: BlockId, account: &Address, after: Option<&H256>, count: u64) -> Option> { if !self.factories.trie.is_fat() { trace!(target: "fatdb", "list_stroage: Not a fat DB"); return None; @@ -1066,20 +1066,20 @@ impl BlockChainClient for Client { Some(keys) } - fn transaction(&self, id: TransactionID) -> Option { + fn transaction(&self, id: TransactionId) -> Option { self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) } - fn transaction_block(&self, id: TransactionID) -> Option { + fn transaction_block(&self, id: TransactionId) -> Option { self.transaction_address(id).map(|addr| addr.block_hash) } - fn uncle(&self, id: UncleID) -> Option { + fn uncle(&self, id: UncleId) -> Option { let index = id.position; self.block_body(id.block).and_then(|body| BodyView::new(&body).uncle_rlp_at(index)) } - fn transaction_receipt(&self, id: TransactionID) -> Option { + fn transaction_receipt(&self, id: TransactionId) -> Option { let chain = self.chain.read(); self.transaction_address(id) .and_then(|address| chain.block_number(&address.block_hash).and_then(|block_number| { @@ -1163,7 +1163,7 @@ impl BlockChainClient for Client { if self.chain.read().is_known(&unverified.hash()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } - if self.block_status(BlockID::Hash(unverified.parent_hash())) == BlockStatus::Unknown { + if self.block_status(BlockId::Hash(unverified.parent_hash())) == BlockStatus::Unknown { return Err(BlockImportError::Block(BlockError::UnknownParent(unverified.parent_hash()))); } } @@ -1177,7 +1177,7 @@ impl BlockChainClient for Client { if self.chain.read().is_known(&header.hash()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } - if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { + if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown { return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash()))); } } @@ -1200,7 +1200,7 @@ impl BlockChainClient for Client { self.engine.additional_params().into_iter().collect() } - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option> { + fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option> { match (self.block_number(from_block), self.block_number(to_block)) { (Some(from), Some(to)) => Some(self.chain.read().blocks_with_bloom(bloom, from, to)), _ => None @@ -1242,20 +1242,20 @@ impl BlockChainClient for Client { let trace_address = trace.address; self.transaction_address(trace.transaction) .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) + self.block_number(BlockId::Hash(tx_address.block_hash)) .and_then(|number| self.tracedb.read().trace(number, tx_address.index, trace_address)) }) } - fn transaction_traces(&self, transaction: TransactionID) -> Option> { + fn transaction_traces(&self, transaction: TransactionId) -> Option> { self.transaction_address(transaction) .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) + self.block_number(BlockId::Hash(tx_address.block_hash)) .and_then(|number| self.tracedb.read().transaction_traces(number, tx_address.index)) }) } - fn block_traces(&self, block: BlockID) -> Option> { + fn block_traces(&self, block: BlockId) -> Option> { self.block_number(block) .and_then(|number| self.tracedb.read().block_traces(number)) } @@ -1290,13 +1290,13 @@ impl BlockChainClient for Client { self.engine.signing_network_id(&self.latest_env_info()) } - fn block_extra_info(&self, id: BlockID) -> Option> { + fn block_extra_info(&self, id: BlockId) -> Option> { self.block_header(id) .map(|block| decode(&block)) .map(|header| self.engine.extra_info(&header)) } - fn uncle_extra_info(&self, id: UncleID) -> Option> { + fn uncle_extra_info(&self, id: UncleId) -> Option> { self.uncle(id) .map(|header| self.engine.extra_info(&decode(&header))) } @@ -1391,20 +1391,20 @@ impl MayPanic for Client { } } -impl ProvingBlockChainClient for Client { - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockID) -> Vec { +impl ::client::ProvingBlockChainClient for Client { + fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec { self.state_at(id) .and_then(move |state| state.prove_storage(key1, key2, from_level).ok()) .unwrap_or_else(Vec::new) } - fn prove_account(&self, key1: H256, from_level: u32, id: BlockID) -> Vec { + fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec { self.state_at(id) .and_then(move |state| state.prove_account(key1, from_level).ok()) .unwrap_or_else(Vec::new) } - fn code_by_hash(&self, account_key: H256, id: BlockID) -> Bytes { + fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes { self.state_at(id) .and_then(move |state| state.code_by_address_hash(account_key).ok()) .and_then(|x| x) diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index af4daece6..4e5554b01 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -27,7 +27,9 @@ pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChain pub use self::error::Error; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; pub use self::chain_notify::ChainNotify; -pub use self::traits::{BlockChainClient, MiningBlockChainClient, ProvingBlockChainClient}; +pub use self::traits::{BlockChainClient, MiningBlockChainClient}; + +pub use self::traits::ProvingBlockChainClient; pub use types::ids::*; pub use types::trace_filter::Filter as TraceFilter; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 433a5166f..2e08b22bf 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -24,8 +24,8 @@ use devtools::*; use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action}; use blockchain::TreeRoute; use client::{ - BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID, - TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError, + BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockId, + TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError, }; use db::{NUM_COLUMNS, COL_STATE}; use header::{Header as BlockHeader, BlockNumber}; @@ -73,7 +73,7 @@ pub struct TestBlockChainClient { /// Execution result. pub execution_result: RwLock>>, /// Transaction receipts. - pub receipts: RwLock>, + pub receipts: RwLock>, /// Logs pub logs: RwLock>, /// Block queue size. @@ -92,8 +92,8 @@ pub struct TestBlockChainClient { pub first_block: RwLock>, } -#[derive(Clone)] /// Used for generating test client blocks. +#[derive(Clone)] pub enum EachBlockWith { /// Plain block. Nothing, @@ -158,7 +158,7 @@ impl TestBlockChainClient { } /// Set the transaction receipt result - pub fn set_transaction_receipt(&self, id: TransactionID, receipt: LocalizedReceipt) { + pub fn set_transaction_receipt(&self, id: TransactionId, receipt: LocalizedReceipt) { self.receipts.write().insert(id, receipt); } @@ -256,8 +256,8 @@ impl TestBlockChainClient { /// Make a bad block by setting invalid extra data. pub fn corrupt_block(&self, n: BlockNumber) { - let hash = self.block_hash(BlockID::Number(n)).unwrap(); - let mut header: BlockHeader = decode(&self.block_header(BlockID::Number(n)).unwrap()); + let hash = self.block_hash(BlockId::Number(n)).unwrap(); + let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); header.set_extra_data(b"This extra data is way too long to be considered valid".to_vec()); let mut rlp = RlpStream::new_list(3); rlp.append(&header); @@ -268,8 +268,8 @@ impl TestBlockChainClient { /// Make a bad block by setting invalid parent hash. pub fn corrupt_block_parent(&self, n: BlockNumber) { - let hash = self.block_hash(BlockID::Number(n)).unwrap(); - let mut header: BlockHeader = decode(&self.block_header(BlockID::Number(n)).unwrap()); + let hash = self.block_hash(BlockId::Number(n)).unwrap(); + let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); header.set_parent_hash(H256::from(42)); let mut rlp = RlpStream::new_list(3); rlp.append(&header); @@ -285,12 +285,12 @@ impl TestBlockChainClient { blocks_read[&index].clone() } - fn block_hash(&self, id: BlockID) -> Option { + fn block_hash(&self, id: BlockId) -> Option { match id { - BlockID::Hash(hash) => Some(hash), - BlockID::Number(n) => self.numbers.read().get(&(n as usize)).cloned(), - BlockID::Earliest => self.numbers.read().get(&0).cloned(), - BlockID::Latest | BlockID::Pending => self.numbers.read().get(&(self.numbers.read().len() - 1)).cloned() + BlockId::Hash(hash) => Some(hash), + BlockId::Number(n) => self.numbers.read().get(&(n as usize)).cloned(), + BlockId::Earliest => self.numbers.read().get(&0).cloned(), + BlockId::Latest | BlockId::Pending => self.numbers.read().get(&(self.numbers.read().len() - 1)).cloned() } } @@ -363,46 +363,46 @@ impl MiningBlockChainClient for TestBlockChainClient { } impl BlockChainClient for TestBlockChainClient { - fn call(&self, _t: &SignedTransaction, _block: BlockID, _analytics: CallAnalytics) -> Result { + fn call(&self, _t: &SignedTransaction, _block: BlockId, _analytics: CallAnalytics) -> Result { self.execution_result.read().clone().unwrap() } - fn replay(&self, _id: TransactionID, _analytics: CallAnalytics) -> Result { + fn replay(&self, _id: TransactionId, _analytics: CallAnalytics) -> Result { self.execution_result.read().clone().unwrap() } - fn block_total_difficulty(&self, _id: BlockID) -> Option { + fn block_total_difficulty(&self, _id: BlockId) -> Option { Some(U256::zero()) } - fn block_hash(&self, id: BlockID) -> Option { + fn block_hash(&self, id: BlockId) -> Option { Self::block_hash(self, id) } - fn nonce(&self, address: &Address, id: BlockID) -> Option { + fn nonce(&self, address: &Address, id: BlockId) -> Option { match id { - BlockID::Latest => Some(self.nonces.read().get(address).cloned().unwrap_or(self.spec.params.account_start_nonce)), + BlockId::Latest => Some(self.nonces.read().get(address).cloned().unwrap_or(self.spec.params.account_start_nonce)), _ => None, } } - fn storage_root(&self, _address: &Address, _id: BlockID) -> Option { + fn storage_root(&self, _address: &Address, _id: BlockId) -> Option { None } fn latest_nonce(&self, address: &Address) -> U256 { - self.nonce(address, BlockID::Latest).unwrap() + self.nonce(address, BlockId::Latest).unwrap() } - fn code(&self, address: &Address, id: BlockID) -> Option> { + fn code(&self, address: &Address, id: BlockId) -> Option> { match id { - BlockID::Latest => Some(self.code.read().get(address).cloned()), + BlockId::Latest => Some(self.code.read().get(address).cloned()), _ => None, } } - fn balance(&self, address: &Address, id: BlockID) -> Option { - if let BlockID::Latest = id { + fn balance(&self, address: &Address, id: BlockId) -> Option { + if let BlockId::Latest = id { Some(self.balances.read().get(address).cloned().unwrap_or_else(U256::zero)) } else { None @@ -410,45 +410,45 @@ impl BlockChainClient for TestBlockChainClient { } fn latest_balance(&self, address: &Address) -> U256 { - self.balance(address, BlockID::Latest).unwrap() + self.balance(address, BlockId::Latest).unwrap() } - fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option { - if let BlockID::Latest = id { + fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option { + if let BlockId::Latest = id { Some(self.storage.read().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new)) } else { None } } - fn list_accounts(&self, _id: BlockID, _after: Option<&Address>, _count: u64) -> Option> { + fn list_accounts(&self, _id: BlockId, _after: Option<&Address>, _count: u64) -> Option> { None } - fn list_storage(&self, _id: BlockID, _account: &Address, _after: Option<&H256>, _count: u64) -> Option> { + fn list_storage(&self, _id: BlockId, _account: &Address, _after: Option<&H256>, _count: u64) -> Option> { None } - fn transaction(&self, _id: TransactionID) -> Option { + fn transaction(&self, _id: TransactionId) -> Option { None // Simple default. } - fn transaction_block(&self, _id: TransactionID) -> Option { + fn transaction_block(&self, _id: TransactionId) -> Option { None // Simple default. } - fn uncle(&self, _id: UncleID) -> Option { + fn uncle(&self, _id: UncleId) -> Option { None // Simple default. } - fn uncle_extra_info(&self, _id: UncleID) -> Option> { + fn uncle_extra_info(&self, _id: UncleId) -> Option> { None } - fn transaction_receipt(&self, id: TransactionID) -> Option { + fn transaction_receipt(&self, id: TransactionId) -> Option { self.receipts.read().get(&id).cloned() } - fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockID, _to_block: BlockID) -> Option> { + fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { unimplemented!(); } @@ -466,14 +466,14 @@ impl BlockChainClient for TestBlockChainClient { } fn best_block_header(&self) -> Bytes { - self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).expect("Best block always have header.") + self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).expect("Best block always have header.") } - fn block_header(&self, id: BlockID) -> Option { + fn block_header(&self, id: BlockId) -> Option { self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) } - fn block_body(&self, id: BlockID) -> Option { + fn block_body(&self, id: BlockId) -> Option { self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| { let mut stream = RlpStream::new_list(2); stream.append_raw(Rlp::new(r).at(1).as_raw(), 1); @@ -482,21 +482,21 @@ impl BlockChainClient for TestBlockChainClient { })) } - fn block(&self, id: BlockID) -> Option { + fn block(&self, id: BlockId) -> Option { self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).cloned()) } - fn block_extra_info(&self, id: BlockID) -> Option> { + fn block_extra_info(&self, id: BlockId) -> Option> { self.block(id) .map(|block| BlockView::new(&block).header()) .map(|header| self.spec.engine.extra_info(&header)) } - fn block_status(&self, id: BlockID) -> BlockStatus { + fn block_status(&self, id: BlockId) -> BlockStatus { match id { - BlockID::Number(number) if (number as usize) < self.blocks.read().len() => BlockStatus::InChain, - BlockID::Hash(ref hash) if self.blocks.read().get(hash).is_some() => BlockStatus::InChain, + BlockId::Number(number) if (number as usize) < self.blocks.read().len() => BlockStatus::InChain, + BlockId::Hash(ref hash) if self.blocks.read().get(hash).is_some() => BlockStatus::InChain, _ => BlockStatus::Unknown } } @@ -649,11 +649,11 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn transaction_traces(&self, _trace: TransactionID) -> Option> { + fn transaction_traces(&self, _trace: TransactionId) -> Option> { unimplemented!(); } - fn block_traces(&self, _trace: BlockID) -> Option> { + fn block_traces(&self, _trace: BlockId) -> Option> { unimplemented!(); } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index e23a564d4..ad21d2ba5 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -50,93 +50,93 @@ pub trait BlockChainClient : Sync + Send { fn keep_alive(&self) {} /// Get raw block header data by block id. - fn block_header(&self, id: BlockID) -> Option; + fn block_header(&self, id: BlockId) -> Option; /// Get raw block body data by block id. /// Block body is an RLP list of two items: uncles and transactions. - fn block_body(&self, id: BlockID) -> Option; + fn block_body(&self, id: BlockId) -> Option; /// Get raw block data by block header hash. - fn block(&self, id: BlockID) -> Option; + fn block(&self, id: BlockId) -> Option; /// Get block status by block header hash. - fn block_status(&self, id: BlockID) -> BlockStatus; + fn block_status(&self, id: BlockId) -> BlockStatus; /// Get block total difficulty. - fn block_total_difficulty(&self, id: BlockID) -> Option; + fn block_total_difficulty(&self, id: BlockId) -> Option; /// Attempt to get address nonce at given block. - /// May not fail on BlockID::Latest. - fn nonce(&self, address: &Address, id: BlockID) -> Option; + /// May not fail on BlockId::Latest. + fn nonce(&self, address: &Address, id: BlockId) -> Option; /// Attempt to get address storage root at given block. - /// May not fail on BlockID::Latest. - fn storage_root(&self, address: &Address, id: BlockID) -> Option; + /// May not fail on BlockId::Latest. + fn storage_root(&self, address: &Address, id: BlockId) -> Option; /// Get address nonce at the latest block's state. fn latest_nonce(&self, address: &Address) -> U256 { - self.nonce(address, BlockID::Latest) - .expect("nonce will return Some when given BlockID::Latest. nonce was given BlockID::Latest. \ + self.nonce(address, BlockId::Latest) + .expect("nonce will return Some when given BlockId::Latest. nonce was given BlockId::Latest. \ Therefore nonce has returned Some; qed") } /// Get block hash. - fn block_hash(&self, id: BlockID) -> Option; + fn block_hash(&self, id: BlockId) -> Option; /// Get address code at given block's state. - fn code(&self, address: &Address, id: BlockID) -> Option>; + fn code(&self, address: &Address, id: BlockId) -> Option>; /// Get address code at the latest block's state. fn latest_code(&self, address: &Address) -> Option { - self.code(address, BlockID::Latest) - .expect("code will return Some if given BlockID::Latest; qed") + self.code(address, BlockId::Latest) + .expect("code will return Some if given BlockId::Latest; qed") } /// Get address balance at the given block's state. /// - /// May not return None if given BlockID::Latest. + /// May not return None if given BlockId::Latest. /// Returns None if and only if the block's root hash has been pruned from the DB. - fn balance(&self, address: &Address, id: BlockID) -> Option; + fn balance(&self, address: &Address, id: BlockId) -> Option; /// Get address balance at the latest block's state. fn latest_balance(&self, address: &Address) -> U256 { - self.balance(address, BlockID::Latest) - .expect("balance will return Some if given BlockID::Latest. balance was given BlockID::Latest \ + self.balance(address, BlockId::Latest) + .expect("balance will return Some if given BlockId::Latest. balance was given BlockId::Latest \ Therefore balance has returned Some; qed") } /// Get value of the storage at given position at the given block's state. /// - /// May not return None if given BlockID::Latest. + /// May not return None if given BlockId::Latest. /// Returns None if and only if the block's root hash has been pruned from the DB. - fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option; + fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option; /// Get value of the storage at given position at the latest block's state. fn latest_storage_at(&self, address: &Address, position: &H256) -> H256 { - self.storage_at(address, position, BlockID::Latest) - .expect("storage_at will return Some if given BlockID::Latest. storage_at was given BlockID::Latest. \ + self.storage_at(address, position, BlockId::Latest) + .expect("storage_at will return Some if given BlockId::Latest. storage_at was given BlockId::Latest. \ Therefore storage_at has returned Some; qed") } /// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`. /// If `after` is set the list starts with the following item. - fn list_accounts(&self, id: BlockID, after: Option<&Address>, count: u64) -> Option>; + fn list_accounts(&self, id: BlockId, after: Option<&Address>, count: u64) -> Option>; /// Get a list of all storage keys in the block `id`, if fat DB is in operation, otherwise `None`. /// If `after` is set the list starts with the following item. - fn list_storage(&self, id: BlockID, account: &Address, after: Option<&H256>, count: u64) -> Option>; + fn list_storage(&self, id: BlockId, account: &Address, after: Option<&H256>, count: u64) -> Option>; /// Get transaction with given hash. - fn transaction(&self, id: TransactionID) -> Option; + fn transaction(&self, id: TransactionId) -> Option; /// Get the hash of block that contains the transaction, if any. - fn transaction_block(&self, id: TransactionID) -> Option; + fn transaction_block(&self, id: TransactionId) -> Option; /// Get uncle with given id. - fn uncle(&self, id: UncleID) -> Option; + fn uncle(&self, id: UncleId) -> Option; /// Get transaction receipt with given hash. - fn transaction_receipt(&self, id: TransactionID) -> Option; + fn transaction_receipt(&self, id: TransactionId) -> Option; /// Get a tree route between `from` and `to`. /// See `BlockChain::tree_route`. @@ -173,16 +173,16 @@ pub trait BlockChainClient : Sync + Send { fn best_block_header(&self) -> Bytes; /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option>; + fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; /// Makes a non-persistent transaction call. - fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result; + fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result; /// Replays a given transaction for inspection. - fn replay(&self, t: TransactionID, analytics: CallAnalytics) -> Result; + fn replay(&self, t: TransactionId, analytics: CallAnalytics) -> Result; /// Returns traces matching given filter. fn filter_traces(&self, filter: TraceFilter) -> Option>; @@ -191,10 +191,10 @@ pub trait BlockChainClient : Sync + Send { fn trace(&self, trace: TraceId) -> Option; /// Returns traces created by transaction. - fn transaction_traces(&self, trace: TransactionID) -> Option>; + fn transaction_traces(&self, trace: TransactionId) -> Option>; /// Returns traces created by transaction from block. - fn block_traces(&self, trace: BlockID) -> Option>; + fn block_traces(&self, trace: BlockId) -> Option>; /// Get last hashes starting from best block. fn last_hashes(&self) -> LastHashes; @@ -211,7 +211,7 @@ pub trait BlockChainClient : Sync + Send { let mut corpus = Vec::new(); while corpus.is_empty() { for _ in 0..sample_size { - let block_bytes = self.block(BlockID::Hash(h)).expect("h is either the best_block_hash or an ancestor; qed"); + let block_bytes = self.block(BlockId::Hash(h)).expect("h is either the best_block_hash or an ancestor; qed"); let block = BlockView::new(&block_bytes); let header = block.header_view(); if header.number() == 0 { @@ -249,11 +249,11 @@ pub trait BlockChainClient : Sync + Send { /// Set the mode. fn set_mode(&self, mode: Mode); - /// Returns engine-related extra info for `BlockID`. - fn block_extra_info(&self, id: BlockID) -> Option>; + /// Returns engine-related extra info for `BlockId`. + fn block_extra_info(&self, id: BlockId) -> Option>; - /// Returns engine-related extra info for `UncleID`. - fn uncle_extra_info(&self, id: UncleID) -> Option>; + /// Returns engine-related extra info for `UncleId`. + fn uncle_extra_info(&self, id: UncleId) -> Option>; /// Returns information about pruning/data availability. fn pruning_info(&self) -> PruningInfo; @@ -288,15 +288,15 @@ pub trait ProvingBlockChainClient: BlockChainClient { /// Returns a vector of raw trie nodes (in order from the root) proving the storage query. /// Nodes after `from_level` may be omitted. /// An empty vector indicates unservable query. - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockID) -> Vec; + fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec; /// Prove account existence at a specific block id. /// The key is the keccak hash of the account's address. /// Returns a vector of raw trie nodes (in order from the root) proving the query. /// Nodes after `from_level` may be omitted. /// An empty vector indicates unservable query. - fn prove_account(&self, key1: H256, from_level: u32, id: BlockID) -> Vec; + fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec; /// Get code by address hash. - fn code_by_hash(&self, account_key: H256, id: BlockID) -> Bytes; + fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes; } \ No newline at end of file diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index 21a6e4761..98190c1ea 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -347,7 +347,6 @@ mod tests { use tests::helpers::*; use account_provider::AccountProvider; use spec::Spec; - use std::time::UNIX_EPOCH; #[test] fn has_valid_metadata() { @@ -442,13 +441,30 @@ mod tests { let engine = Spec::new_test_round().engine; let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap(); - let time = UNIX_EPOCH.elapsed().unwrap().as_secs(); // Two authorities. - let mut step = time - time % 2; - header.set_seal(vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); + // Spec starts with step 2. + header.set_seal(vec![encode(&2usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); assert!(engine.verify_block_seal(&header).is_err()); - step = step + 1; - header.set_seal(vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); + header.set_seal(vec![encode(&1usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); assert!(engine.verify_block_seal(&header).is_ok()); } + + #[test] + fn rejects_future_block() { + let mut header: Header = Header::default(); + let tap = AccountProvider::transient_provider(); + let addr = tap.insert_account("0".sha3(), "0").unwrap(); + + header.set_author(addr); + + let engine = Spec::new_test_round().engine; + + let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap(); + // Two authorities. + // Spec starts with step 2. + header.set_seal(vec![encode(&1usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); + assert!(engine.verify_block_seal(&header).is_ok()); + header.set_seal(vec![encode(&5usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); + assert!(engine.verify_block_seal(&header).is_err()); + } } diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index b171856b9..83a96837f 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -23,7 +23,7 @@ use account_provider::{AccountProvider, Error as AccountError}; use views::{BlockView, HeaderView}; use header::Header; use state::{State, CleanupMode}; -use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics, TransactionID}; +use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockId, CallAnalytics, TransactionId}; use client::TransactionImportResult; use executive::contract_address; use block::{ClosedBlock, SealedBlock, IsBlock, Block}; @@ -585,7 +585,7 @@ impl Miner { let best_block_header: Header = ::rlp::decode(&chain.best_block_header()); transactions.into_iter() .map(|tx| { - if chain.transaction_block(TransactionID::Hash(tx.hash())).is_some() { + if chain.transaction_block(TransactionId::Hash(tx.hash())).is_some() { debug!(target: "miner", "Rejected tx {:?}: already in the blockchain", tx.hash()); return Err(Error::Transaction(TransactionError::AlreadyImported)); } @@ -701,7 +701,7 @@ impl MinerService for Miner { Ok(ret) }, None => { - chain.call(t, BlockID::Latest, analytics) + chain.call(t, BlockId::Latest, analytics) } } } @@ -1092,20 +1092,6 @@ impl MinerService for Miner { fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) { trace!(target: "miner", "chain_new_blocks"); - fn fetch_transactions(chain: &MiningBlockChainClient, hash: &H256) -> Vec { - let block = chain - .block(BlockID::Hash(*hash)) - // Client should send message after commit to db and inserting to chain. - .expect("Expected in-chain blocks."); - let block = BlockView::new(&block); - let txs = block.transactions(); - // populate sender - for tx in &txs { - let _sender = tx.sender(); - } - txs - } - // 1. We ignore blocks that were `imported` (because it means that they are not in canon-chain, and transactions // should be still available in the queue. // 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that @@ -1116,35 +1102,29 @@ impl MinerService for Miner { // Then import all transactions... { - let out_of_chain = retracted - .par_iter() - .map(|h| fetch_transactions(chain, h)); - out_of_chain.for_each(|txs| { - let mut transaction_queue = self.transaction_queue.lock(); - let _ = self.add_transactions_to_queue( - chain, txs, TransactionOrigin::RetractedBlock, &mut transaction_queue - ); - }); + retracted.par_iter() + .map(|hash| { + let block = chain.block(BlockId::Hash(*hash)) + .expect("Client is sending message after commit to db and inserting to chain; the block is available; qed"); + let block = BlockView::new(&block); + let txs = block.transactions(); + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + txs + }).for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock(); + let _ = self.add_transactions_to_queue( + chain, txs, TransactionOrigin::RetractedBlock, &mut transaction_queue + ); + }); } - // ...and at the end remove old ones + // ...and at the end remove the old ones { - let in_chain = enacted - .par_iter() - .map(|h: &H256| fetch_transactions(chain, h)); - - in_chain.for_each(|mut txs| { - let mut transaction_queue = self.transaction_queue.lock(); - - let to_remove = txs.drain(..) - .map(|tx| { - tx.sender().expect("Transaction is in block, so sender has to be defined.") - }) - .collect::>(); - for sender in to_remove { - transaction_queue.remove_all(sender, chain.latest_nonce(&sender)); - } - }); + let mut transaction_queue = self.transaction_queue.lock(); + transaction_queue.remove_old(|sender| chain.latest_nonce(sender)); } if enacted.len() > 0 { diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index cd2d3ba47..b4cc93a9c 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -79,8 +79,10 @@ //! we check if the transactions should go to `current` (comparing state nonce) //! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. //! 3. `remove_all` is used to inform the queue about client (state) nonce changes. -//! - It removes all transactions (either from `current` or `future`) with nonce < client nonce -//! - It moves matching `future` transactions to `current` +//! - It removes all transactions (either from `current` or `future`) with nonce < client nonce +//! - It moves matching `future` transactions to `current` +//! 4. `remove_old` is used as convenient method to update the state nonce for all senders in the queue. +//! - Invokes `remove_all` with latest state nonce for all senders. use std::ops::Deref; use std::cmp::Ordering; @@ -752,6 +754,26 @@ impl TransactionQueue { /// Removes all transactions from particular sender up to (excluding) given client (state) nonce. /// Client (State) Nonce = next valid nonce for this sender. pub fn remove_all(&mut self, sender: Address, client_nonce: U256) { + // Check if there is anything in current... + let should_check_in_current = self.current.by_address.row(&sender) + // If nonce == client_nonce nothing is changed + .and_then(|by_nonce| by_nonce.keys().find(|nonce| *nonce < &client_nonce)) + .map(|_| ()); + // ... or future + let should_check_in_future = self.future.by_address.row(&sender) + // if nonce == client_nonce we need to promote to current + .and_then(|by_nonce| by_nonce.keys().find(|nonce| *nonce <= &client_nonce)) + .map(|_| ()); + + if should_check_in_current.or(should_check_in_future).is_none() { + return; + } + + self.remove_all_internal(sender, client_nonce); + } + + /// Always updates future and moves transactions from current to future. + fn remove_all_internal(&mut self, sender: Address, client_nonce: U256) { // We will either move transaction to future or remove it completely // so there will be no transactions from this sender in current self.last_nonces.remove(&sender); @@ -765,6 +787,20 @@ impl TransactionQueue { assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len()); } + /// Checks the current nonce for all transactions' senders in the queue and removes the old transactions. + pub fn remove_old(&mut self, fetch_nonce: F) where + F: Fn(&Address) -> U256, + { + let senders = self.current.by_address.keys() + .chain(self.future.by_address.keys()) + .cloned() + .collect::>(); + + for sender in senders { + self.remove_all(sender, fetch_nonce(&sender)); + } + } + /// Penalize transactions from sender of transaction with given hash. /// I.e. it should change the priority of the transaction in the queue. /// @@ -847,7 +883,7 @@ impl TransactionQueue { if order.is_some() { // This will keep consistency in queue // Moves all to future and then promotes a batch from current: - self.remove_all(sender, current_nonce); + self.remove_all_internal(sender, current_nonce); assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len()); return; } @@ -2438,7 +2474,7 @@ mod test { } #[test] - fn should_reject_transactions_below_bas_gas() { + fn should_reject_transactions_below_base_gas() { // given let mut txq = TransactionQueue::default(); let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); @@ -2457,4 +2493,26 @@ mod test { } + #[test] + fn should_clear_all_old_transactions() { + // given + let mut txq = TransactionQueue::default(); + let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); + let (tx3, tx4) = new_tx_pair_default(1.into(), 0.into()); + let nonce1 = tx1.nonce; + + // Insert all transactions + txq.add(tx1, TransactionOrigin::External, &default_account_details, &gas_estimator).unwrap(); + txq.add(tx2, TransactionOrigin::External, &default_account_details, &gas_estimator).unwrap(); + txq.add(tx3, TransactionOrigin::External, &default_account_details, &gas_estimator).unwrap(); + txq.add(tx4, TransactionOrigin::External, &default_account_details, &gas_estimator).unwrap(); + assert_eq!(txq.top_transactions().len(), 4); + + // when + txq.remove_old(|_| nonce1 + U256::one()); + + // then + assert_eq!(txq.top_transactions().len(), 2); + } + } diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index d417695f0..cc84d8e48 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -18,7 +18,7 @@ use std::fmt; -use ids::BlockID; +use ids::BlockId; use util::H256; use util::trie::TrieError; @@ -28,7 +28,7 @@ use rlp::DecoderError; #[derive(Debug)] pub enum Error { /// Invalid starting block for snapshot. - InvalidStartingBlock(BlockID), + InvalidStartingBlock(BlockId), /// Block not found. BlockNotFound(H256), /// Incomplete chain. diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 408941309..9b45f2687 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -27,7 +27,7 @@ use account_db::{AccountDB, AccountDBMut}; use blockchain::{BlockChain, BlockProvider}; use engines::Engine; use header::Header; -use ids::BlockID; +use ids::BlockId; use views::BlockView; use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256, Uint}; @@ -129,7 +129,7 @@ pub fn take_snapshot( p: &Progress ) -> Result<(), Error> { let start_header = try!(chain.block_header(&block_at) - .ok_or(Error::InvalidStartingBlock(BlockID::Hash(block_at)))); + .ok_or(Error::InvalidStartingBlock(BlockId::Hash(block_at)))); let state_root = start_header.state_root(); let number = start_header.number(); diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 89ee68de0..05c4c1f9f 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -30,7 +30,7 @@ use blockchain::BlockChain; use client::{BlockChainClient, Client}; use engines::Engine; use error::Error; -use ids::BlockID; +use ids::BlockId; use service::ClientIoMessage; use io::IoChannel; @@ -354,7 +354,7 @@ impl Service { let writer = try!(LooseWriter::new(temp_dir.clone())); let guard = Guard::new(temp_dir.clone()); - let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress); + let res = client.take_snapshot(writer, BlockId::Number(num), &self.progress); self.taking_snapshot.store(false, Ordering::SeqCst); if let Err(e) = res { diff --git a/ethcore/src/snapshot/tests/service.rs b/ethcore/src/snapshot/tests/service.rs index e136985c6..efdb12323 100644 --- a/ethcore/src/snapshot/tests/service.rs +++ b/ethcore/src/snapshot/tests/service.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use client::{BlockChainClient, Client}; -use ids::BlockID; +use ids::BlockId; use snapshot::service::{Service, ServiceParams}; use snapshot::{self, ManifestData, SnapshotService}; use spec::Spec; @@ -96,8 +96,8 @@ fn restored_is_equivalent() { assert_eq!(service.status(), ::snapshot::RestorationStatus::Inactive); for x in 0..NUM_BLOCKS { - let block1 = client.block(BlockID::Number(x as u64)).unwrap(); - let block2 = client2.block(BlockID::Number(x as u64)).unwrap(); + let block1 = client.block(BlockId::Number(x as u64)).unwrap(); + let block2 = client2.block(BlockId::Number(x as u64)).unwrap(); assert_eq!(block1, block2); } diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 43439e437..ab4dde134 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -18,7 +18,7 @@ use util::Mutex; use client::{BlockChainClient, Client, ChainNotify}; -use ids::BlockID; +use ids::BlockId; use service::ClientIoMessage; use views::HeaderView; @@ -43,7 +43,7 @@ impl Oracle for StandardOracle where F: Send + Sync + Fn() -> bool { fn to_number(&self, hash: H256) -> Option { - self.client.block_header(BlockID::Hash(hash)).map(|h| HeaderView::new(&h).number()) + self.client.block_header(BlockId::Hash(hash)).map(|h| HeaderView::new(&h).number()) } fn is_major_importing(&self) -> bool { diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index b3d63d0ae..8a52b62ff 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -31,6 +31,7 @@ use transaction::SignedTransaction; use state_db::StateDB; use util::*; + use util::trie::recorder::{Recorder, BasicRecorder as TrieRecorder}; mod account; diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 427082823..0b688345d 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use io::IoChannel; -use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockID}; +use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId}; use state::CleanupMode; use ethereum; use block::IsBlock; @@ -99,7 +99,7 @@ fn imports_good_block() { client.flush_queue(); client.import_verified_blocks(); - let block = client.block_header(BlockID::Number(1)).unwrap(); + let block = client.block_header(BlockId::Number(1)).unwrap(); assert!(!block.is_empty()); } @@ -117,7 +117,7 @@ fn query_none_block() { IoChannel::disconnected(), &db_config ).unwrap(); - let non_existant = client.block_header(BlockID::Number(188)); + let non_existant = client.block_header(BlockId::Number(188)); assert!(non_existant.is_none()); } @@ -125,7 +125,7 @@ fn query_none_block() { fn query_bad_block() { let client_result = get_test_client_with_blocks(vec![get_bad_state_dummy_block()]); let client = client_result.reference(); - let bad_block:Option = client.block_header(BlockID::Number(1)); + let bad_block:Option = client.block_header(BlockId::Number(1)); assert!(bad_block.is_none()); } @@ -146,8 +146,8 @@ fn returns_logs() { let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); let client = client_result.reference(); let logs = client.logs(Filter { - from_block: BlockID::Earliest, - to_block: BlockID::Latest, + from_block: BlockId::Earliest, + to_block: BlockId::Latest, address: None, topics: vec![], limit: None, @@ -161,8 +161,8 @@ fn returns_logs_with_limit() { let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); let client = client_result.reference(); let logs = client.logs(Filter { - from_block: BlockID::Earliest, - to_block: BlockID::Latest, + from_block: BlockId::Earliest, + to_block: BlockId::Latest, address: None, topics: vec![], limit: Some(2), @@ -176,7 +176,7 @@ fn returns_block_body() { let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); let client = client_result.reference(); let block = BlockView::new(&dummy_block); - let body = client.block_body(BlockID::Hash(block.header().hash())).unwrap(); + let body = client.block_body(BlockId::Hash(block.header().hash())).unwrap(); let body = Rlp::new(&body); assert_eq!(body.item_count(), 2); assert_eq!(body.at(0).as_raw()[..], block.rlp().at(1).as_raw()[..]); @@ -187,7 +187,7 @@ fn returns_block_body() { fn imports_block_sequence() { let client_result = generate_dummy_client(6); let client = client_result.reference(); - let block = client.block_header(BlockID::Number(5)).unwrap(); + let block = client.block_header(BlockId::Number(5)).unwrap(); assert!(!block.is_empty()); } diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index b021e750d..2da94b3d0 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -19,7 +19,7 @@ use nanoipc; use std::sync::Arc; use std::sync::atomic::{Ordering, AtomicBool}; -use client::{Client, BlockChainClient, ClientConfig, BlockID}; +use client::{Client, BlockChainClient, ClientConfig, BlockId}; use client::remote::RemoteClient; use tests::helpers::*; use devtools::*; @@ -71,7 +71,7 @@ fn can_query_block() { run_test_worker(scope, stop_guard.share(), socket_path); let remote_client = nanoipc::generic_client::>(socket_path).unwrap(); - let non_existant_block = remote_client.block_header(BlockID::Number(999)); + let non_existant_block = remote_client.block_header(BlockId::Number(999)); assert!(non_existant_block.is_none()); }) diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index 14129d4d9..174c9b386 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -94,7 +94,7 @@ impl Key for TraceGroupPosition { } #[derive(Debug, Hash, Eq, PartialEq)] -enum CacheID { +enum CacheId { Trace(H256), Bloom(TraceGroupPosition), } @@ -104,7 +104,7 @@ pub struct TraceDB where T: DatabaseExtras { // cache traces: RwLock>, blooms: RwLock>, - cache_manager: RwLock>, + cache_manager: RwLock>, // db tracesdb: Arc, // config, @@ -119,7 +119,7 @@ impl BloomGroupDatabase for TraceDB where T: DatabaseExtras { fn blooms_at(&self, position: &GroupPosition) -> Option { let position = TraceGroupPosition::from(position.clone()); let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.blooms, &position).map(Into::into); - self.note_used(CacheID::Bloom(position)); + self.note_used(CacheId::Bloom(position)); result } } @@ -152,7 +152,7 @@ impl TraceDB where T: DatabaseExtras { } /// Let the cache system know that a cacheable item has been used. - fn note_used(&self, id: CacheID) { + fn note_used(&self, id: CacheId) { let mut cache_manager = self.cache_manager.write(); cache_manager.note_used(id); } @@ -168,8 +168,8 @@ impl TraceDB where T: DatabaseExtras { cache_manager.collect_garbage(current_size, | ids | { for id in &ids { match *id { - CacheID::Trace(ref h) => { traces.remove(h); }, - CacheID::Bloom(ref h) => { blooms.remove(h); }, + CacheId::Trace(ref h) => { traces.remove(h); }, + CacheId::Bloom(ref h) => { blooms.remove(h); }, } } traces.shrink_to_fit(); @@ -182,7 +182,7 @@ impl TraceDB where T: DatabaseExtras { /// Returns traces for block with hash. fn traces(&self, block_hash: &H256) -> Option { let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.traces, block_hash); - self.note_used(CacheID::Trace(block_hash.clone())); + self.note_used(CacheId::Trace(block_hash.clone())); result } @@ -289,7 +289,7 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { batch.extend_with_cache(db::COL_TRACE, &mut *blooms, blooms_to_insert, CacheUpdatePolicy::Remove); // note_used must be called after locking blooms to avoid cache/traces deadlock on garbage collection for key in blooms_keys { - self.note_used(CacheID::Bloom(key)); + self.note_used(CacheId::Bloom(key)); } } @@ -300,7 +300,7 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { // cause this value might be queried by hash later batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite); // note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection - self.note_used(CacheID::Trace(request.block_hash.clone())); + self.note_used(CacheId::Trace(request.block_hash.clone())); } } diff --git a/ethcore/src/types/filter.rs b/ethcore/src/types/filter.rs index e3487e5f6..ecc997f71 100644 --- a/ethcore/src/types/filter.rs +++ b/ethcore/src/types/filter.rs @@ -18,17 +18,17 @@ use util::{Address, H256, Hashable, H2048}; use util::bloom::Bloomable; -use client::BlockID; +use client::BlockId; use log_entry::LogEntry; /// Blockchain Filter. #[derive(Binary, Debug, PartialEq)] pub struct Filter { /// Blockchain will be searched from this block. - pub from_block: BlockID, + pub from_block: BlockId, /// Till this block. - pub to_block: BlockID, + pub to_block: BlockId, /// Search addresses. /// @@ -114,14 +114,14 @@ impl Filter { mod tests { use util::FixedHash; use filter::Filter; - use client::BlockID; + use client::BlockId; use log_entry::LogEntry; #[test] fn test_bloom_possibilities_none() { let none_filter = Filter { - from_block: BlockID::Earliest, - to_block: BlockID::Latest, + from_block: BlockId::Earliest, + to_block: BlockId::Latest, address: None, topics: vec![None, None, None, None], limit: None, @@ -136,8 +136,8 @@ mod tests { #[test] fn test_bloom_possibilities_single_address_and_topic() { let filter = Filter { - from_block: BlockID::Earliest, - to_block: BlockID::Latest, + from_block: BlockId::Earliest, + to_block: BlockId::Latest, address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), topics: vec![ Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), @@ -155,8 +155,8 @@ mod tests { #[test] fn test_bloom_possibilities_single_address_and_many_topics() { let filter = Filter { - from_block: BlockID::Earliest, - to_block: BlockID::Latest, + from_block: BlockId::Earliest, + to_block: BlockId::Latest, address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), topics: vec![ Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), @@ -174,8 +174,8 @@ mod tests { #[test] fn test_bloom_possibilites_multiple_addresses_and_topics() { let filter = Filter { - from_block: BlockID::Earliest, - to_block: BlockID::Latest, + from_block: BlockId::Earliest, + to_block: BlockId::Latest, address: Some(vec![ "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), "b372018f3be9e171df0581136b59d2faf73a7d5d".into(), @@ -204,8 +204,8 @@ mod tests { #[test] fn test_filter_matches() { let filter = Filter { - from_block: BlockID::Earliest, - to_block: BlockID::Latest, + from_block: BlockId::Earliest, + to_block: BlockId::Latest, address: Some(vec!["b372018f3be9e171df0581136b59d2faf73a7d5d".into()]), topics: vec![ Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), diff --git a/ethcore/src/types/ids.rs b/ethcore/src/types/ids.rs index 1fe81f392..2c3c777b4 100644 --- a/ethcore/src/types/ids.rs +++ b/ethcore/src/types/ids.rs @@ -21,7 +21,7 @@ use header::BlockNumber; /// Uniquely identifies block. #[derive(Debug, PartialEq, Copy, Clone, Hash, Eq, Binary)] -pub enum BlockID { +pub enum BlockId { /// Block's sha3. /// Querying by hash is always faster. Hash(H256), @@ -37,28 +37,28 @@ pub enum BlockID { /// Uniquely identifies transaction. #[derive(Debug, PartialEq, Clone, Hash, Eq, Binary)] -pub enum TransactionID { +pub enum TransactionId { /// Transaction's sha3. Hash(H256), /// Block id and transaction index within this block. /// Querying by block position is always faster. - Location(BlockID, usize) + Location(BlockId, usize) } /// Uniquely identifies Trace. #[derive(Binary)] pub struct TraceId { /// Transaction - pub transaction: TransactionID, + pub transaction: TransactionId, /// Trace address within transaction. pub address: Vec, } /// Uniquely identifies Uncle. #[derive(Debug, PartialEq, Eq, Copy, Clone, Binary)] -pub struct UncleID { +pub struct UncleId { /// Block id. - pub block: BlockID, + pub block: BlockId, /// Position in block. pub position: usize } diff --git a/ethcore/src/types/trace_filter.rs b/ethcore/src/types/trace_filter.rs index c17cc9e85..af9d7c3ee 100644 --- a/ethcore/src/types/trace_filter.rs +++ b/ethcore/src/types/trace_filter.rs @@ -18,13 +18,13 @@ use std::ops::Range; use util::{Address}; -use types::ids::BlockID; +use types::ids::BlockId; /// Easy to use trace filter. #[derive(Binary)] pub struct Filter { /// Range of filtering. - pub range: Range, + pub range: Range, /// From address. pub from_address: Vec
, /// To address. diff --git a/ethstore/src/dir/disk.rs b/ethstore/src/dir/disk.rs index 56b2c1ccb..c86123d1a 100644 --- a/ethstore/src/dir/disk.rs +++ b/ethstore/src/dir/disk.rs @@ -20,7 +20,7 @@ use std::collections::HashMap; use time; use ethkey::Address; use {json, SafeAccount, Error}; -use json::UUID; +use json::Uuid; use super::KeyDirectory; const IGNORED_FILES: &'static [&'static str] = &["thumbs.db", "address_book.json"]; @@ -113,7 +113,7 @@ impl KeyDirectory for DiskDirectory { // build file path let filename = account.filename.as_ref().cloned().unwrap_or_else(|| { let timestamp = time::strftime("%Y-%m-%dT%H-%M-%S", &time::now_utc()).expect("Time-format string is valid."); - format!("UTC--{}Z--{}", timestamp, UUID::from(account.id)) + format!("UTC--{}Z--{}", timestamp, Uuid::from(account.id)) }); // update account filename diff --git a/ethstore/src/ethstore.rs b/ethstore/src/ethstore.rs index 4991c4714..3747431fb 100644 --- a/ethstore/src/ethstore.rs +++ b/ethstore/src/ethstore.rs @@ -24,7 +24,7 @@ use dir::KeyDirectory; use account::SafeAccount; use {Error, SecretStore}; use json; -use json::UUID; +use json::Uuid; use parking_lot::RwLock; use presale::PresaleWallet; use import; @@ -154,7 +154,7 @@ impl SecretStore for EthStore { account.public(password) } - fn uuid(&self, address: &Address) -> Result { + fn uuid(&self, address: &Address) -> Result { let account = try!(self.get(address)); Ok(account.id.into()) } diff --git a/ethstore/src/json/error.rs b/ethstore/src/json/error.rs index 5e077cfad..a3ea4d326 100644 --- a/ethstore/src/json/error.rs +++ b/ethstore/src/json/error.rs @@ -21,7 +21,7 @@ pub enum Error { UnsupportedCipher, InvalidCipherParams, UnsupportedKdf, - InvalidUUID, + InvalidUuid, UnsupportedVersion, InvalidCiphertext, InvalidH256, @@ -31,7 +31,7 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { - Error::InvalidUUID => write!(f, "Invalid UUID"), + Error::InvalidUuid => write!(f, "Invalid Uuid"), Error::UnsupportedVersion => write!(f, "Unsupported version"), Error::UnsupportedKdf => write!(f, "Unsupported kdf"), Error::InvalidCiphertext => write!(f, "Invalid ciphertext"), diff --git a/ethstore/src/json/id.rs b/ethstore/src/json/id.rs index ff282a9f8..664716d95 100644 --- a/ethstore/src/json/id.rs +++ b/ethstore/src/json/id.rs @@ -23,15 +23,15 @@ use super::Error; /// Universaly unique identifier. #[derive(Debug, PartialEq)] -pub struct UUID([u8; 16]); +pub struct Uuid([u8; 16]); -impl From<[u8; 16]> for UUID { +impl From<[u8; 16]> for Uuid { fn from(uuid: [u8; 16]) -> Self { - UUID(uuid) + Uuid(uuid) } } -impl<'a> Into for &'a UUID { +impl<'a> Into for &'a Uuid { fn into(self) -> String { let d1 = &self.0[0..4]; let d2 = &self.0[4..6]; @@ -42,44 +42,44 @@ impl<'a> Into for &'a UUID { } } -impl Into for UUID { +impl Into for Uuid { fn into(self) -> String { Into::into(&self) } } -impl Into<[u8; 16]> for UUID { +impl Into<[u8; 16]> for Uuid { fn into(self) -> [u8; 16] { self.0 } } -impl fmt::Display for UUID { +impl fmt::Display for Uuid { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - let s: String = (self as &UUID).into(); + let s: String = (self as &Uuid).into(); write!(f, "{}", s) } } fn copy_into(from: &str, into: &mut [u8]) -> Result<(), Error> { - let from = try!(from.from_hex().map_err(|_| Error::InvalidUUID)); + let from = try!(from.from_hex().map_err(|_| Error::InvalidUuid)); if from.len() != into.len() { - return Err(Error::InvalidUUID); + return Err(Error::InvalidUuid); } into.copy_from_slice(&from); Ok(()) } -impl str::FromStr for UUID { +impl str::FromStr for Uuid { type Err = Error; fn from_str(s: &str) -> Result { let parts: Vec<&str> = s.split("-").collect(); if parts.len() != 5 { - return Err(Error::InvalidUUID); + return Err(Error::InvalidUuid); } let mut uuid = [0u8; 16]; @@ -90,17 +90,17 @@ impl str::FromStr for UUID { try!(copy_into(parts[3], &mut uuid[8..10])); try!(copy_into(parts[4], &mut uuid[10..16])); - Ok(UUID(uuid)) + Ok(Uuid(uuid)) } } -impl From<&'static str> for UUID { +impl From<&'static str> for Uuid { fn from(s: &'static str) -> Self { s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s)) } } -impl Serialize for UUID { +impl Serialize for Uuid { fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer { let s: String = self.into(); @@ -108,17 +108,17 @@ impl Serialize for UUID { } } -impl Deserialize for UUID { +impl Deserialize for Uuid { fn deserialize(deserializer: &mut D) -> Result where D: Deserializer { - deserializer.deserialize(UUIDVisitor) + deserializer.deserialize(UuidVisitor) } } -struct UUIDVisitor; +struct UuidVisitor; -impl Visitor for UUIDVisitor { - type Value = UUID; +impl Visitor for UuidVisitor { + type Value = Uuid; fn visit_str(&mut self, value: &str) -> Result where E: SerdeError { value.parse().map_err(SerdeError::custom) @@ -131,18 +131,18 @@ impl Visitor for UUIDVisitor { #[cfg(test)] mod tests { - use super::UUID; + use super::Uuid; #[test] fn uuid_from_str() { - let uuid: UUID = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into(); - assert_eq!(uuid, UUID::from([0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, 0xe5, 0xb6])); + let uuid: Uuid = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into(); + assert_eq!(uuid, Uuid::from([0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, 0xe5, 0xb6])); } #[test] fn uuid_from_and_to_str() { let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6"; - let uuid: UUID = from.into(); + let uuid: Uuid = from.into(); let to: String = uuid.into(); assert_eq!(from, &to); } diff --git a/ethstore/src/json/key_file.rs b/ethstore/src/json/key_file.rs index 6e37c7c89..093479b3a 100644 --- a/ethstore/src/json/key_file.rs +++ b/ethstore/src/json/key_file.rs @@ -18,11 +18,11 @@ use std::io::{Read, Write}; use serde::{Deserialize, Deserializer, Error}; use serde::de::{Visitor, MapVisitor}; use serde_json; -use super::{UUID, Version, Crypto, H160}; +use super::{Uuid, Version, Crypto, H160}; #[derive(Debug, PartialEq, Serialize)] pub struct KeyFile { - pub id: UUID, + pub id: Uuid, pub version: Version, pub crypto: Crypto, pub address: H160, @@ -31,7 +31,7 @@ pub struct KeyFile { } enum KeyFileField { - ID, + Id, Version, Crypto, Address, @@ -56,7 +56,7 @@ impl Visitor for KeyFileFieldVisitor { where E: Error { match value { - "id" => Ok(KeyFileField::ID), + "id" => Ok(KeyFileField::Id), "version" => Ok(KeyFileField::Version), "crypto" => Ok(KeyFileField::Crypto), "Crypto" => Ok(KeyFileField::Crypto), @@ -94,7 +94,7 @@ impl Visitor for KeyFileVisitor { loop { match try!(visitor.visit_key()) { - Some(KeyFileField::ID) => { id = Some(try!(visitor.visit_value())); } + Some(KeyFileField::Id) => { id = Some(try!(visitor.visit_value())); } Some(KeyFileField::Version) => { version = Some(try!(visitor.visit_value())); } Some(KeyFileField::Crypto) => { crypto = Some(try!(visitor.visit_value())); } Some(KeyFileField::Address) => { address = Some(try!(visitor.visit_value())); } @@ -153,7 +153,7 @@ impl KeyFile { mod tests { use std::str::FromStr; use serde_json; - use json::{KeyFile, UUID, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt}; + use json::{KeyFile, Uuid, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt}; #[test] fn basic_keyfile() { @@ -183,7 +183,7 @@ mod tests { }"#; let expected = KeyFile { - id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), + id: Uuid::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), version: Version::V3, address: "6edddfc6349aff20bc6467ccf276c5b52487f7a8".into(), crypto: Crypto { diff --git a/ethstore/src/json/mod.rs.in b/ethstore/src/json/mod.rs.in index 133d9821e..8ad48b994 100644 --- a/ethstore/src/json/mod.rs.in +++ b/ethstore/src/json/mod.rs.in @@ -14,7 +14,7 @@ pub use self::cipher::{Cipher, CipherSer, CipherSerParams, Aes128Ctr}; pub use self::crypto::{Crypto, CipherText}; pub use self::error::Error; pub use self::hash::{H128, H160, H256}; -pub use self::id::UUID; +pub use self::id::Uuid; pub use self::kdf::{Kdf, KdfSer, Prf, Pbkdf2, Scrypt, KdfSerParams}; pub use self::key_file::KeyFile; pub use self::presale::{PresaleWallet, Encseed}; diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index 06f38922b..bf0a4ec44 100644 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -16,7 +16,7 @@ use ethkey::{Address, Message, Signature, Secret, Public}; use Error; -use json::UUID; +use json::Uuid; pub trait SecretStore: Send + Sync { fn insert_account(&self, secret: Secret, password: &str) -> Result; @@ -30,7 +30,7 @@ pub trait SecretStore: Send + Sync { fn public(&self, account: &Address, password: &str) -> Result; fn accounts(&self) -> Result, Error>; - fn uuid(&self, account: &Address) -> Result; + fn uuid(&self, account: &Address) -> Result; fn name(&self, account: &Address) -> Result; fn meta(&self, account: &Address) -> Result; diff --git a/js/.babelrc b/js/.babelrc index 8147da435..9f44b6bd2 100644 --- a/js/.babelrc +++ b/js/.babelrc @@ -17,6 +17,15 @@ }, "development": { "plugins": ["react-hot-loader/babel"] + }, + "test": { + "plugins": [ + [ + "babel-plugin-webpack-alias", { + "config": "webpack/test.js" + } + ] + ] } } } diff --git a/js/npm/etherscan/README.md b/js/npm/etherscan/README.md new file mode 100644 index 000000000..8130db3c6 --- /dev/null +++ b/js/npm/etherscan/README.md @@ -0,0 +1,34 @@ +# @parity/etherscan + +A thin, lightweight promise wrapper for the api.etherscan.io/apis service, exposing a common endpoint for use in JavaScript applications. + +[https://github.com/ethcore/parity/tree/master/js/src/3rdparty/etherscan](https://github.com/ethcore/parity/tree/master/js/src/3rdparty/etherscan) + +## usage + +installation - + +``` +npm install --save @parity/etherscan +``` + +Usage - + +``` +const etherscan = require('@parity/etherscan'); + +// api calls goes here +``` + +## api + +account (exposed on etherscan.account) - + +- `balance(address)` +- `balances(addresses)` (array or addresses) +- `transactions(address, page)` (page offset starts at 0, returns 25) + +stats (exposed on etherscan.stats) - + +- `price()` +- `supply()` diff --git a/js/npm/etherscan/package.json b/js/npm/etherscan/package.json new file mode 100644 index 000000000..0cfdf83e1 --- /dev/null +++ b/js/npm/etherscan/package.json @@ -0,0 +1,33 @@ +{ + "name": "@parity/etherscan", + "description": "The Parity Promise-based library for interfacing with Etherscan over HTTP", + "version": "0.0.0", + "main": "library.js", + "author": "Parity Team ", + "maintainers": [ + "Jaco Greeff" + ], + "contributors": [], + "license": "GPL-3.0", + "repository": { + "type": "git", + "url": "git+https://github.com/ethcore/parity.git" + }, + "keywords": [ + "Ethereum", + "ABI", + "API", + "RPC", + "Parity", + "Promise" + ], + "scripts": { + }, + "devDependencies": { + "chai": "3.5.0", + "mocha": "3.2.0" + }, + "dependencies": { + "node-fetch": "~1.6.3" + } +} diff --git a/js/parity.md b/js/npm/parity/README.md similarity index 92% rename from js/parity.md rename to js/npm/parity/README.md index 3e42f5c8d..30efd3b94 100644 --- a/js/parity.md +++ b/js/npm/parity/README.md @@ -1,7 +1,9 @@ -# parity.js +# @parity/parity.js Parity.js is a thin, fast, Promise-based wrapper around the Ethereum APIs. +[https://github.com/ethcore/parity/tree/master/js/src/api](https://github.com/ethcore/parity/tree/master/js/src/api) + ## installation Install the package with `npm install --save @parity/parity.js` diff --git a/js/parity.package.json b/js/npm/parity/package.json similarity index 72% rename from js/parity.package.json rename to js/npm/parity/package.json index 0974e072f..7b1fcebda 100644 --- a/js/parity.package.json +++ b/js/npm/parity/package.json @@ -1,6 +1,6 @@ { "name": "@parity/parity.js", - "description": "The Parity Promise-base API & ABI library for interfacing with Ethereum over RPC", + "description": "The Parity Promise-based API & ABI library for interfacing with Ethereum over RPC", "version": "0.0.0", "main": "library.js", "author": "Parity Team ", @@ -26,8 +26,8 @@ "devDependencies": { }, "dependencies": { - "bignumber.js": "^2.3.0", - "js-sha3": "^0.5.2", - "node-fetch": "^1.6.3" + "bignumber.js": "~2.3.0", + "js-sha3": "~0.5.2", + "node-fetch": "~1.6.3" } } diff --git a/js/npm/parity/test/smoke.spec.js b/js/npm/parity/test/smoke.spec.js new file mode 100644 index 000000000..9920b10d2 --- /dev/null +++ b/js/npm/parity/test/smoke.spec.js @@ -0,0 +1,26 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +const parity = require('../'); + +describe('load the Parity library', function () { + it('should no throw any error', () => { + expect(parity).to.be.ok; + + expect(parity.Api).to.be.ok; + expect(parity.Abi).to.be.ok; + }); +}); diff --git a/js/npm/shapeshift/README.md b/js/npm/shapeshift/README.md new file mode 100644 index 000000000..0544b6f99 --- /dev/null +++ b/js/npm/shapeshift/README.md @@ -0,0 +1,34 @@ +# @parity/shapeshift + +A thin ES6 promise wrapper around the shapeshift.io APIs as documented at https://shapeshift.io/api + +[https://github.com/ethcore/parity/tree/master/js/src/3rdparty/shapeshift](https://github.com/ethcore/parity/tree/master/js/src/3rdparty/shapeshift) + +## usage + +installation - + +``` +npm install --save @parity/shapeshift +``` + +Usage - + +``` +const APIKEY = 'private affiliate key or undefined'; +const shapeshift = require('@parity/shapeshift')(APIKEY); + +// api calls goes here +``` + +## api + +queries - + +- `getCoins()` [https://shapeshift.io/api#api-104](https://shapeshift.io/api#api-104) +- `getMarketInfo(pair)` [https://shapeshift.io/api#api-103](https://shapeshift.io/api#api-103) +- `getStatus(depositAddress)` [https://shapeshift.io/api#api-5](https://shapeshift.io/api#api-5) + +transactions - + +- `shift(toAddress, returnAddress, pair)` [https://shapeshift.io/api#api-7](https://shapeshift.io/api#api-7) diff --git a/js/npm/shapeshift/package.json b/js/npm/shapeshift/package.json new file mode 100644 index 000000000..b0a2c460a --- /dev/null +++ b/js/npm/shapeshift/package.json @@ -0,0 +1,31 @@ +{ + "name": "@parity/shapeshift", + "description": "The Parity Promise-based library for interfacing with ShapeShift over HTTP", + "version": "0.0.0", + "main": "library.js", + "author": "Parity Team ", + "maintainers": [ + "Jaco Greeff" + ], + "contributors": [], + "license": "GPL-3.0", + "repository": { + "type": "git", + "url": "git+https://github.com/ethcore/parity.git" + }, + "keywords": [ + "Ethereum", + "ABI", + "API", + "RPC", + "Parity", + "Promise" + ], + "scripts": { + }, + "devDependencies": { + }, + "dependencies": { + "node-fetch": "~1.6.3" + } +} diff --git a/js/npm/test/mocha.config.js b/js/npm/test/mocha.config.js new file mode 100644 index 000000000..2b871f518 --- /dev/null +++ b/js/npm/test/mocha.config.js @@ -0,0 +1,29 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +const chai = require('chai'); +// const chaiAsPromised from 'chai-as-promised'; +// const chaiEnzyme from 'chai-enzyme'; +// const sinonChai from 'sinon-chai'; + +// chai.use(chaiAsPromised); +// chai.use(chaiEnzyme()); +// chai.use(sinonChai); + +// expose expect to global so we won't have to manually import & define it in every test +global.expect = chai.expect; + +module.exports = {}; diff --git a/js/npm/test/mocha.opts b/js/npm/test/mocha.opts new file mode 100644 index 000000000..0ed8269b4 --- /dev/null +++ b/js/npm/test/mocha.opts @@ -0,0 +1 @@ +-r ./test/mocha.config diff --git a/js/package.json b/js/package.json index 404a625b5..a8b156dd2 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "0.2.105", + "version": "0.2.110", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", @@ -40,10 +40,10 @@ "coveralls": "npm run testCoverage && coveralls < coverage/lcov.info", "lint": "eslint --ignore-path .gitignore ./src/", "lint:cached": "eslint --cache --ignore-path .gitignore ./src/", - "test": "mocha 'src/**/*.spec.js'", - "test:coverage": "istanbul cover _mocha -- 'src/**/*.spec.js'", - "test:e2e": "mocha 'src/**/*.e2e.js'", - "test:npm": "(cd .npmjs && npm i) && node test/npmLibrary && (rm -rf .npmjs/node_modules)", + "test": "NODE_ENV=test mocha 'src/**/*.spec.js'", + "test:coverage": "NODE_ENV=test istanbul cover _mocha -- 'src/**/*.spec.js'", + "test:e2e": "NODE_ENV=test mocha 'src/**/*.e2e.js'", + "test:npm": "(cd .npmjs && npm i) && node test/npmParity && (rm -rf .npmjs/node_modules)", "prepush": "npm run lint:cached" }, "devDependencies": { @@ -57,6 +57,7 @@ "babel-plugin-transform-object-rest-spread": "6.20.2", "babel-plugin-transform-react-remove-prop-types": "0.2.11", "babel-plugin-transform-runtime": "6.15.0", + "babel-plugin-webpack-alias": "2.1.2", "babel-polyfill": "6.20.0", "babel-preset-es2015": "6.18.0", "babel-preset-es2016": "6.16.0", @@ -66,6 +67,7 @@ "babel-register": "6.18.0", "babel-runtime": "6.20.0", "chai": "3.5.0", + "chai-as-promised": "6.0.0", "chai-enzyme": "0.6.1", "circular-dependency-plugin": "2.0.0", "copy-webpack-plugin": "4.0.1", @@ -99,8 +101,8 @@ "mock-local-storage": "1.0.2", "mock-socket": "6.0.3", "nock": "9.0.2", - "postcss-import": "8.1.0", - "postcss-loader": "1.1.1", + "postcss-import": "9.0.0", + "postcss-loader": "1.2.0", "postcss-nested": "1.0.0", "postcss-simple-vars": "3.0.0", "progress": "1.1.8", @@ -137,13 +139,14 @@ "js-sha3": "0.5.5", "lodash": "4.17.2", "marked": "0.3.6", - "material-ui": "0.16.4", + "material-ui": "0.16.5", "material-ui-chip-input": "0.11.1", "mobx": "2.6.4", "mobx-react": "4.0.3", "mobx-react-devtools": "4.2.10", "moment": "2.17.0", "phoneformat.js": "1.0.3", + "push.js": "0.0.11", "qs": "6.3.0", "react": "15.4.1", "react-ace": "4.1.0", diff --git a/js/scripts/dryrun-npm.sh b/js/scripts/dryrun-npm.sh new file mode 100755 index 000000000..6d9412f62 --- /dev/null +++ b/js/scripts/dryrun-npm.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +# variables +PACKAGES=( "parity" "etherscan" "shapeshift" ) + +# change into the build directory +BASEDIR=`dirname $0` +cd $BASEDIR/.. + +# build all packages +echo "*** Building packages for npmjs" +echo "$NPM_TOKEN" >> ~/.npmrc + +for PACKAGE in ${PACKAGES[@]} +do + echo "*** Building $PACKAGE" + LIBRARY=$PACKAGE npm run ci:build:npm + DIRECTORY=.npmjs/$PACKAGE + + cd $DIRECTORY + echo "*** Executing $PACKAGE tests from $DIRECTORY" + npm test + + echo "*** Publishing $PACKAGE from $DIRECTORY" + echo "npm publish --access public || true" + cd ../.. + +done +cd .. + +# exit with exit code +exit 0 diff --git a/js/scripts/release.sh b/js/scripts/release.sh index 1cf3095ef..88422df0a 100755 --- a/js/scripts/release.sh +++ b/js/scripts/release.sh @@ -3,7 +3,7 @@ set -e # variables UTCDATE=`date -u "+%Y%m%d-%H%M%S"` -PACKAGES=( "parity.js" ) +PACKAGES=( "parity" "etherscan" "shapeshift" ) BRANCH=$CI_BUILD_REF_NAME GIT_JS_PRECOMPILED="https://${GITHUB_JS_PRECOMPILED}:@github.com/ethcore/js-precompiled.git" GIT_PARITY="https://${GITHUB_JS_PRECOMPILED}:@github.com/ethcore/parity.git" @@ -59,19 +59,27 @@ git reset --hard origin/$BRANCH 2>$GITLOG if [ "$BRANCH" == "master" ]; then cd js + echo "*** Bumping package.json patch version" npm --no-git-tag-version version npm version patch echo "*** Building packages for npmjs" - # echo -e "$NPM_USERNAME\n$NPM_PASSWORD\n$NPM_EMAIL" | npm login echo "$NPM_TOKEN" >> ~/.npmrc - npm run ci:build:npm - echo "*** Publishing $PACKAGE to npmjs" - cd .npmjs - npm publish --access public || true - cd ../.. + for PACKAGE in ${PACKAGES[@]} + do + echo "*** Building $PACKAGE" + LIBRARY=$PACKAGE npm run ci:build:npm + DIRECTORY=.npmjs/$PACKAGE + + echo "*** Publishing $PACKAGE from $DIRECTORY" + cd $DIRECTORY + npm publish --access public || true + cd ../.. + done + + cd .. fi echo "*** Updating cargo parity-ui-precompiled#$PRECOMPILED_HASH" diff --git a/js/src/3rdparty/etherscan/account.spec.js b/js/src/3rdparty/etherscan/account.spec.js index 283fab3d2..9dc36c254 100644 --- a/js/src/3rdparty/etherscan/account.spec.js +++ b/js/src/3rdparty/etherscan/account.spec.js @@ -14,11 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import etherscan from './'; +const etherscan = require('./'); const TESTADDR = '0xbf885e2b55c6bcc84556a3c5f07d3040833c8d00'; -describe.skip('etherscan/account', () => { +describe.skip('etherscan/account', function () { + this.timeout(60 * 1000); + const checkBalance = function (balance, addr) { expect(balance).to.be.ok; expect(balance.account).to.equal(addr); diff --git a/js/src/3rdparty/etherscan/stats.spec.js b/js/src/3rdparty/etherscan/stats.spec.js index fde2b035c..62152b6be 100644 --- a/js/src/3rdparty/etherscan/stats.spec.js +++ b/js/src/3rdparty/etherscan/stats.spec.js @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import etherscan from './'; +const etherscan = require('./'); + +describe.skip('etherscan/stats', function () { + this.timeout(60 * 1000); -describe.skip('etherscan/stats', () => { it('retrieves the latest price', () => { return etherscan.stats .price() diff --git a/js/src/3rdparty/shapeshift/helpers.spec.js b/js/src/3rdparty/shapeshift/helpers.spec.js index a82b2f6c3..f2ea0f3f9 100644 --- a/js/src/3rdparty/shapeshift/helpers.spec.js +++ b/js/src/3rdparty/shapeshift/helpers.spec.js @@ -14,22 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import chai from 'chai'; -import nock from 'nock'; +const nock = require('nock'); -global.expect = chai.expect; // eslint-disable-line no-undef - -import 'isomorphic-fetch'; -import es6Promise from 'es6-promise'; -es6Promise.polyfill(); - -import initShapeshift from './'; -import initRpc from './rpc'; +const ShapeShift = require('./'); +const initShapeshift = (ShapeShift.default || ShapeShift); const APIKEY = '0x123454321'; const shapeshift = initShapeshift(APIKEY); -const rpc = initRpc(APIKEY); +const rpc = shapeshift.getRpc(); function mockget (requests) { let scope = nock(rpc.ENDPOINT); @@ -62,7 +55,7 @@ function mockpost (requests) { return scope; } -export { +module.exports = { APIKEY, mockget, mockpost, diff --git a/js/src/3rdparty/shapeshift/rpc.spec.js b/js/src/3rdparty/shapeshift/rpc.spec.js index 8de9e8641..47d4e0052 100644 --- a/js/src/3rdparty/shapeshift/rpc.spec.js +++ b/js/src/3rdparty/shapeshift/rpc.spec.js @@ -14,7 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { APIKEY, mockget, mockpost, rpc } from './helpers.spec.js'; +const helpers = require('./helpers.spec.js'); + +const APIKEY = helpers.APIKEY; +const mockget = helpers.mockget; +const mockpost = helpers.mockpost; +const rpc = helpers.rpc; describe('shapeshift/rpc', () => { describe('GET', () => { diff --git a/js/src/3rdparty/shapeshift/shapeshift.js b/js/src/3rdparty/shapeshift/shapeshift.js index 8f388d0a7..5743fcac1 100644 --- a/js/src/3rdparty/shapeshift/shapeshift.js +++ b/js/src/3rdparty/shapeshift/shapeshift.js @@ -26,6 +26,10 @@ export default function (rpc) { return rpc.get(`marketinfo/${pair}`); } + function getRpc () { + return rpc; + } + function getStatus (depositAddress) { return rpc.get(`txStat/${depositAddress}`); } @@ -103,6 +107,7 @@ export default function (rpc) { return { getCoins, getMarketInfo, + getRpc, getStatus, shift, subscribe, diff --git a/js/src/3rdparty/shapeshift/shapeshift.spec.js b/js/src/3rdparty/shapeshift/shapeshift.spec.js index 36b1506a2..01180e130 100644 --- a/js/src/3rdparty/shapeshift/shapeshift.spec.js +++ b/js/src/3rdparty/shapeshift/shapeshift.spec.js @@ -14,7 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { mockget, mockpost, shapeshift } from './helpers.spec.js'; +const helpers = require('./helpers.spec.js'); + +const mockget = helpers.mockget; +const mockpost = helpers.mockpost; +const shapeshift = helpers.shapeshift; describe('shapeshift/calls', () => { describe('getCoins', () => { diff --git a/js/src/contracts/contracts.js b/js/src/contracts/contracts.js index f61a63690..a8020b825 100644 --- a/js/src/contracts/contracts.js +++ b/js/src/contracts/contracts.js @@ -62,6 +62,10 @@ export default class Contracts { } static create (api) { + if (instance) { + return instance; + } + return new Contracts(api); } diff --git a/js/src/contracts/registry.js b/js/src/contracts/registry.js index 2f61f7f4a..9354a59e5 100644 --- a/js/src/contracts/registry.js +++ b/js/src/contracts/registry.js @@ -19,7 +19,10 @@ import * as abis from './abi'; export default class Registry { constructor (api) { this._api = api; - this._contracts = []; + + this._contracts = {}; + this._pendingContracts = {}; + this._instance = null; this._fetching = false; this._queue = []; @@ -59,20 +62,25 @@ export default class Registry { getContract (_name) { const name = _name.toLowerCase(); - return new Promise((resolve, reject) => { - if (this._contracts[name]) { - resolve(this._contracts[name]); - return; - } + if (this._contracts[name]) { + return Promise.resolve(this._contracts[name]); + } - this - .lookupAddress(name) - .then((address) => { - this._contracts[name] = this._api.newContract(abis[name], address); - resolve(this._contracts[name]); - }) - .catch(reject); - }); + if (this._pendingContracts[name]) { + return this._pendingContracts[name]; + } + + const promise = this + .lookupAddress(name) + .then((address) => { + this._contracts[name] = this._api.newContract(abis[name], address); + delete this._pendingContracts[name]; + return this._contracts[name]; + }); + + this._pendingContracts[name] = promise; + + return promise; } getContractInstance (_name) { @@ -89,7 +97,7 @@ export default class Registry { return instance.getAddress.call({}, [sha3, 'A']); }) .then((address) => { - console.log('lookupAddress', name, sha3, address); + console.log('[lookupAddress]', `(${sha3}) ${name}: ${address}`); return address; }); } diff --git a/js/src/dapps/localtx/Application/application.spec.js b/js/src/dapps/localtx/Application/application.spec.js index 2044b4e14..3ffee343d 100644 --- a/js/src/dapps/localtx/Application/application.spec.js +++ b/js/src/dapps/localtx/Application/application.spec.js @@ -21,7 +21,7 @@ import '../../../environment/tests'; import Application from './application'; -describe('localtx/Application', () => { +describe('dapps/localtx/Application', () => { describe('rendering', () => { it('renders without crashing', () => { const rendered = shallow(); diff --git a/js/src/dapps/localtx/Transaction/transaction.spec.js b/js/src/dapps/localtx/Transaction/transaction.spec.js index 5e9c39147..04f2f8de8 100644 --- a/js/src/dapps/localtx/Transaction/transaction.spec.js +++ b/js/src/dapps/localtx/Transaction/transaction.spec.js @@ -29,7 +29,7 @@ Api.api = { import BigNumber from 'bignumber.js'; import { Transaction, LocalTransaction } from './transaction'; -describe('localtx/Transaction', () => { +describe('dapps/localtx/Transaction', () => { describe('rendering', () => { it('renders without crashing', () => { const transaction = { @@ -51,7 +51,7 @@ describe('localtx/Transaction', () => { }); }); -describe('localtx/LocalTransaction', () => { +describe('dapps/localtx/LocalTransaction', () => { describe('rendering', () => { it('renders without crashing', () => { const rendered = shallow( diff --git a/js/src/index.js b/js/src/index.js index 6938a46f8..46d6c9c74 100644 --- a/js/src/index.js +++ b/js/src/index.js @@ -67,7 +67,7 @@ if (window.location.hash && window.location.hash.indexOf(AUTH_HASH) === 0) { const api = new SecureApi(`ws://${parityUrl}`, token); ContractInstances.create(api); -const store = initStore(api); +const store = initStore(api, hashHistory); store.dispatch({ type: 'initAll', api }); store.dispatch(setApi(api)); diff --git a/js/src/jsonrpc/interfaces/parity.js b/js/src/jsonrpc/interfaces/parity.js index 067ced1fc..76886af35 100644 --- a/js/src/jsonrpc/interfaces/parity.js +++ b/js/src/jsonrpc/interfaces/parity.js @@ -43,7 +43,7 @@ export default { }, uuid: { type: String, - desc: 'The account UUID, or null if not available/unknown/not applicable.' + desc: 'The account Uuid, or null if not available/unknown/not applicable.' } } } @@ -66,7 +66,7 @@ export default { }, uuid: { type: String, - desc: 'The account UUID, or null if not available/unknown/not applicable.' + desc: 'The account Uuid, or null if not available/unknown/not applicable.' } } } diff --git a/js/src/library.etherscan.js b/js/src/library.etherscan.js new file mode 100644 index 000000000..59cb861d3 --- /dev/null +++ b/js/src/library.etherscan.js @@ -0,0 +1,34 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import 'babel-polyfill/dist/polyfill.js'; +import es6Promise from 'es6-promise'; +es6Promise.polyfill(); + +const isNode = typeof global !== 'undefined' && typeof global !== 'undefined'; +const isBrowser = typeof self !== 'undefined' && typeof self.window !== 'undefined'; + +if (isBrowser) { + require('whatwg-fetch'); +} + +if (isNode) { + global.fetch = require('node-fetch'); +} + +import Etherscan from './3rdparty/etherscan'; + +module.exports = Etherscan; diff --git a/js/src/library.js b/js/src/library.parity.js similarity index 100% rename from js/src/library.js rename to js/src/library.parity.js diff --git a/js/src/library.shapeshift.js b/js/src/library.shapeshift.js new file mode 100644 index 000000000..3f24f216b --- /dev/null +++ b/js/src/library.shapeshift.js @@ -0,0 +1,34 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import 'babel-polyfill/dist/polyfill.js'; +import es6Promise from 'es6-promise'; +es6Promise.polyfill(); + +const isNode = typeof global !== 'undefined' && typeof global !== 'undefined'; +const isBrowser = typeof self !== 'undefined' && typeof self.window !== 'undefined'; + +if (isBrowser) { + require('whatwg-fetch'); +} + +if (isNode) { + global.fetch = require('node-fetch'); +} + +import ShapeShift from './3rdparty/shapeshift'; + +module.exports = ShapeShift; diff --git a/js/src/main.js b/js/src/main.js index d508c50fc..f61e3d563 100644 --- a/js/src/main.js +++ b/js/src/main.js @@ -15,7 +15,7 @@ // along with Parity. If not, see . import React, { Component, PropTypes } from 'react'; -import { Redirect, Router, Route } from 'react-router'; +import { Redirect, Router, Route, IndexRoute } from 'react-router'; import { Accounts, Account, Addresses, Address, Application, Contract, Contracts, WriteContract, Wallet, Dapp, Dapps, Settings, SettingsBackground, SettingsParity, SettingsProxy, SettingsViews, Signer, Status } from '~/views'; @@ -26,6 +26,23 @@ export default class MainApplication extends Component { routerHistory: PropTypes.any.isRequired }; + handleDeprecatedRoute = (nextState, replace) => { + const { address } = nextState.params; + const redirectMap = { + account: 'accounts', + address: 'addresses', + contract: 'contracts' + }; + + const oldRoute = nextState.routes[0].path; + const newRoute = Object.keys(redirectMap).reduce((newRoute, key) => { + return newRoute.replace(new RegExp(`^/${key}`), '/' + redirectMap[key]); + }, oldRoute); + + console.warn(`Route "${oldRoute}" is deprecated. Please use "${newRoute}"`); + replace(newRoute.replace(':address', address)); + } + render () { const { routerHistory } = this.props; @@ -34,26 +51,46 @@ export default class MainApplication extends Component { + + { /** Backward Compatible links */ } + + + + - - - - - + + + + + + + + + + + - - - + + + + + + + + - - + + + + + ); diff --git a/js/src/modals/AddAddress/addAddress.js b/js/src/modals/AddAddress/addAddress.js index e44cb0b3c..a72158cc7 100644 --- a/js/src/modals/AddAddress/addAddress.js +++ b/js/src/modals/AddAddress/addAddress.js @@ -28,6 +28,7 @@ export default class AddAddress extends Component { static propTypes = { contacts: PropTypes.object.isRequired, + address: PropTypes.string, onClose: PropTypes.func }; @@ -39,6 +40,12 @@ export default class AddAddress extends Component { description: '' }; + componentWillMount () { + if (this.props.address) { + this.onEditAddress(null, this.props.address); + } + } + render () { return ( + balances={ balances } + error={ fromAddressError } + onChange={ this.onFromAddressChange } + value={ fromAddress } + /> + /> + /> { this.renderContractSelect() } @@ -119,17 +123,19 @@ export default class DetailsStep extends Component { label='abi / solc combined-output' hint='the abi of the contract to deploy or solc combined-output' error={ abiError } - value={ solcOutput } onChange={ this.onSolcChange } onSubmit={ this.onSolcSubmit } - readOnly={ readOnly } /> + readOnly={ readOnly } + value={ solcOutput } + /> + readOnly={ readOnly || solc } + value={ code } + /> ); diff --git a/js/src/modals/DeployContract/deployContract.js b/js/src/modals/DeployContract/deployContract.js index 5bf4fc389..21325f786 100644 --- a/js/src/modals/DeployContract/deployContract.js +++ b/js/src/modals/DeployContract/deployContract.js @@ -15,8 +15,10 @@ // along with Parity. If not, see . import React, { Component, PropTypes } from 'react'; +import { connect } from 'react-redux'; import ActionDoneAll from 'material-ui/svg-icons/action/done-all'; import ContentClear from 'material-ui/svg-icons/content/clear'; +import { pick } from 'lodash'; import { BusyStep, CompletedStep, CopyToClipboard, Button, IdentityIcon, Modal, TxHash } from '~/ui'; import { ERRORS, validateAbi, validateCode, validateName } from '~/util/validation'; @@ -36,7 +38,7 @@ const STEPS = { COMPLETED: { title: 'completed' } }; -export default class DeployContract extends Component { +class DeployContract extends Component { static contextTypes = { api: PropTypes.object.isRequired, store: PropTypes.object.isRequired @@ -45,6 +47,7 @@ export default class DeployContract extends Component { static propTypes = { accounts: PropTypes.object.isRequired, onClose: PropTypes.func.isRequired, + balances: PropTypes.object, abi: PropTypes.string, code: PropTypes.string, readOnly: PropTypes.bool, @@ -192,7 +195,7 @@ export default class DeployContract extends Component { } renderStep () { - const { accounts, readOnly } = this.props; + const { accounts, readOnly, balances } = this.props; const { address, deployError, step, deployState, txhash, rejected } = this.state; if (deployError) { @@ -216,6 +219,7 @@ export default class DeployContract extends Component { { + const balances = pick(state.balances.balances, fromAddresses); + return { balances }; + }; +} + +export default connect( + mapStateToProps +)(DeployContract); + diff --git a/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js b/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js index 3ffb929a9..fde7fa1b2 100644 --- a/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js +++ b/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js @@ -32,25 +32,27 @@ export default class DetailsStep extends Component { static propTypes = { accounts: PropTypes.object.isRequired, contract: PropTypes.object.isRequired, - amount: PropTypes.string, - amountError: PropTypes.string, onAmountChange: PropTypes.func.isRequired, - fromAddress: PropTypes.string, - fromAddressError: PropTypes.string, - gasEdit: PropTypes.bool, onFromAddressChange: PropTypes.func.isRequired, - func: PropTypes.object, - funcError: PropTypes.string, - onFuncChange: PropTypes.func, - onGasEditClick: PropTypes.func, + onValueChange: PropTypes.func.isRequired, values: PropTypes.array.isRequired, valuesError: PropTypes.array.isRequired, - warning: PropTypes.string, - onValueChange: PropTypes.func.isRequired + + amount: PropTypes.string, + amountError: PropTypes.string, + balances: PropTypes.object, + fromAddress: PropTypes.string, + fromAddressError: PropTypes.string, + func: PropTypes.object, + funcError: PropTypes.string, + gasEdit: PropTypes.bool, + onFuncChange: PropTypes.func, + onGasEditClick: PropTypes.func, + warning: PropTypes.string } render () { - const { accounts, amount, amountError, fromAddress, fromAddressError, gasEdit, onGasEditClick, onFromAddressChange, onAmountChange } = this.props; + const { accounts, amount, amountError, balances, fromAddress, fromAddressError, gasEdit, onGasEditClick, onFromAddressChange, onAmountChange } = this.props; return (
@@ -61,6 +63,7 @@ export default class DetailsStep extends Component { value={ fromAddress } error={ fromAddressError } accounts={ accounts } + balances={ balances } onChange={ onFromAddressChange } /> { this.renderFunctionSelect() } { this.renderParameters() } diff --git a/js/src/modals/ExecuteContract/executeContract.js b/js/src/modals/ExecuteContract/executeContract.js index 7b4e8ccd2..c3ac96490 100644 --- a/js/src/modals/ExecuteContract/executeContract.js +++ b/js/src/modals/ExecuteContract/executeContract.js @@ -18,6 +18,8 @@ import React, { Component, PropTypes } from 'react'; import { connect } from 'react-redux'; import { bindActionCreators } from 'redux'; import { observer } from 'mobx-react'; +import { pick } from 'lodash'; + import ActionDoneAll from 'material-ui/svg-icons/action/done-all'; import ContentClear from 'material-ui/svg-icons/content/clear'; import NavigationArrowBack from 'material-ui/svg-icons/navigation/arrow-back'; @@ -57,6 +59,7 @@ class ExecuteContract extends Component { isTest: PropTypes.bool, fromAddress: PropTypes.string, accounts: PropTypes.object, + balances: PropTypes.object, contract: PropTypes.object, gasLimit: PropTypes.object.isRequired, onClose: PropTypes.func.isRequired, @@ -362,10 +365,15 @@ class ExecuteContract extends Component { } } -function mapStateToProps (state) { - const { gasLimit } = state.nodeStatus; +function mapStateToProps (initState, initProps) { + const fromAddresses = Object.keys(initProps.accounts); - return { gasLimit }; + return (state) => { + const balances = pick(state.balances.balances, fromAddresses); + const { gasLimit } = state.nodeStatus; + + return { gasLimit, balances }; + }; } function mapDispatchToProps (dispatch) { diff --git a/js/src/modals/Transfer/Details/details.js b/js/src/modals/Transfer/Details/details.js index 53f04c489..20ac06f85 100644 --- a/js/src/modals/Transfer/Details/details.js +++ b/js/src/modals/Transfer/Details/details.js @@ -134,6 +134,7 @@ export default class Details extends Component { images: PropTypes.object.isRequired, sender: PropTypes.string, senderError: PropTypes.string, + sendersBalances: PropTypes.object, recipient: PropTypes.string, recipientError: PropTypes.string, tag: PropTypes.string, @@ -203,7 +204,7 @@ export default class Details extends Component { } renderFromAddress () { - const { sender, senderError, senders } = this.props; + const { sender, senderError, senders, sendersBalances } = this.props; if (!senders) { return null; @@ -218,6 +219,7 @@ export default class Details extends Component { hint='the sender address' value={ sender } onChange={ this.onEditSender } + balances={ sendersBalances } /> ); diff --git a/js/src/modals/Transfer/store.js b/js/src/modals/Transfer/store.js index cbb10f17f..e08d7203d 100644 --- a/js/src/modals/Transfer/store.js +++ b/js/src/modals/Transfer/store.js @@ -54,6 +54,7 @@ export default class TransferStore { @observable sender = ''; @observable senderError = null; + @observable sendersBalances = {}; @observable total = '0.0'; @observable totalError = null; @@ -66,8 +67,6 @@ export default class TransferStore { onClose = null; senders = null; - sendersBalances = null; - isWallet = false; wallet = null; @@ -107,10 +106,9 @@ export default class TransferStore { constructor (api, props) { this.api = api; - const { account, balance, gasLimit, senders, onClose, newError, sendersBalances } = props; + const { account, balance, gasLimit, senders, newError, sendersBalances } = props; this.account = account; this.balance = balance; - this.onClose = onClose; this.isWallet = account && account.wallet; this.newError = newError; @@ -136,8 +134,7 @@ export default class TransferStore { this.stage -= 1; } - @action onClose = () => { - this.onClose && this.onClose(); + @action handleClose = () => { this.stage = 0; } diff --git a/js/src/modals/Transfer/transfer.js b/js/src/modals/Transfer/transfer.js index 00e84adaf..57dc569f2 100644 --- a/js/src/modals/Transfer/transfer.js +++ b/js/src/modals/Transfer/transfer.js @@ -155,8 +155,8 @@ class Transfer extends Component { renderDetailsPage () { const { account, balance, images, senders } = this.props; - const { valueAll, extras, recipient, recipientError, sender, senderError } = this.store; - const { tag, total, totalError, value, valueError } = this.store; + const { recipient, recipientError, sender, senderError, sendersBalances } = this.store; + const { valueAll, extras, tag, total, totalError, value, valueError } = this.store; return (
} label='Cancel' - onClick={ this.store.onClose } /> + onClick={ this.handleClose } /> ); const nextBtn = (