From a56c5e6ba8d8a3cc3005275a5d6d69f9c4f4ed28 Mon Sep 17 00:00:00 2001 From: Antonios Hadjigeorgalis Date: Tue, 26 Sep 2017 17:53:05 -0400 Subject: [PATCH 01/22] Updated systemd files for linux (#6592) Previous version put $BASE directory in root directory. This version clearly explains how to run as root or as specific user. Additional configuration: * send SIGHUP for clean exit, * restart on fail. Tested on Ubuntu 16.04.3 LTS with 4.10.0-33-generic x86_64 kernel --- scripts/parity.service | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/parity.service b/scripts/parity.service index 31d9011ea..1af907234 100644 --- a/scripts/parity.service +++ b/scripts/parity.service @@ -3,8 +3,18 @@ Description=Parity Daemon After=network.target [Service] -EnvironmentFile=-%h/.parity/parity.conf -ExecStart=/usr/bin/parity $ARGS +# run as root, set base_path in config.toml +ExecStart=/usr/bin/parity --config /etc/parity/config.toml +# To run as user, comment out above and uncomment below, fill in user and group +# picks up users default config.toml in $HOME/.local/.share/io.parity.ethereum/ +# User=username +# Group=groupname +# ExecStart=/usr/bin/parity +Restart=on-failure + +# Specifies which signal to use when killing a service. Defaults to SIGTERM. +# SIGHUP gives parity time to exit cleanly before SIGKILL (default 90s) +KillSignal=SIGHUP [Install] WantedBy=default.target From fb38c20c167537a9a39061d3b1134f0c5d8e5008 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 3 Oct 2017 11:35:31 +0300 Subject: [PATCH 02/22] SecretStore: exclusive sessions --- .../client_sessions/signing_session.rs | 8 + .../src/key_server_cluster/cluster.rs | 33 ++-- .../key_server_cluster/cluster_sessions.rs | 169 +++++++++++++++--- secret_store/src/key_server_cluster/mod.rs | 7 +- 4 files changed, 177 insertions(+), 40 deletions(-) diff --git a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs index d094e6516..a111d4472 100644 --- a/secret_store/src/key_server_cluster/client_sessions/signing_session.rs +++ b/secret_store/src/key_server_cluster/client_sessions/signing_session.rs @@ -532,6 +532,14 @@ impl Cluster for SessionKeyGenerationTransport { debug_assert!(self.other_nodes_ids.contains(to)); self.cluster.send(to, self.map_message(message)?) } + + fn is_connected(&self, node: &NodeId) -> bool { + self.cluster.is_connected(node) + } + + fn nodes(&self) -> BTreeSet { + self.cluster.nodes() + } } impl SessionCore { diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index b6107bd8d..c9cf30cdb 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -97,6 +97,10 @@ pub trait Cluster: Send + Sync { fn broadcast(&self, message: Message) -> Result<(), Error>; /// Send message to given node. fn send(&self, to: &NodeId, message: Message) -> Result<(), Error>; + /// Is connected to given node? + fn is_connected(&self, node: &NodeId) -> bool; + /// Get a set of connected nodes. + fn nodes(&self) -> BTreeSet; } /// Cluster initialization parameters. @@ -652,7 +656,7 @@ impl ClusterCore { }, Err(err) => { warn!(target: "secretstore_net", "{}: decryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); - let error_message = message::DecryptionSessionError { +let error_message = message::DecryptionSessionError { session: session_id.clone().into(), sub_session: sub_session_id.clone().into(), session_nonce: session_nonce, @@ -1287,14 +1291,6 @@ impl ClusterView { })), } } - - pub fn is_connected(&self, node: &NodeId) -> bool { - self.core.lock().nodes.contains(node) - } - - pub fn nodes(&self) -> BTreeSet { - self.core.lock().nodes.clone() - } } impl Cluster for ClusterView { @@ -1315,6 +1311,14 @@ impl Cluster for ClusterView { core.cluster.spawn(connection.send_message(message)); Ok(()) } + + fn is_connected(&self, node: &NodeId) -> bool { + self.core.lock().nodes.contains(node) + } + + fn nodes(&self) -> BTreeSet { + self.core.lock().nodes.clone() + } } impl ClusterClientImpl { @@ -1460,7 +1464,7 @@ fn make_socket_address(address: &str, port: u16) -> Result { pub mod tests { use std::sync::Arc; use std::time; - use std::collections::VecDeque; + use std::collections::{BTreeSet, VecDeque}; use parking_lot::Mutex; use tokio_core::reactor::Core; use ethkey::{Random, Generator, Public}; @@ -1517,6 +1521,15 @@ pub mod tests { self.data.lock().messages.push_back((to.clone(), message)); Ok(()) } + + fn is_connected(&self, node: &NodeId) -> bool { + let data = self.data.lock(); + &self.id == node || data.nodes.contains(node) + } + + fn nodes(&self) -> BTreeSet { + self.data.lock().nodes.iter().cloned().collect() + } } pub fn loop_until(core: &mut Core, timeout: time::Duration, predicate: F) where F: Fn() -> bool { diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index cfc00241d..15c3e0bcb 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -18,10 +18,10 @@ use std::time; use std::sync::{Arc, Weak}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::collections::{VecDeque, BTreeSet, BTreeMap}; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; use ethkey::{Public, Secret, Signature}; use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta}; -use key_server_cluster::cluster::{Cluster, ClusterData, ClusterView, ClusterConfiguration}; +use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration}; use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage, ShareAddMessage, ShareMoveMessage, ShareRemoveMessage, ServersSetChangeMessage}; use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl, @@ -50,7 +50,7 @@ const SESSION_TIMEOUT_INTERVAL: u64 = 60; lazy_static! { /// Servers set change session id (there could be at most 1 session => hardcoded id). - static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c206f4b71d62491dfb9f7dbeccc42a6c112c8bb507de7b4fcad8d646272b2c363" + static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c" .parse() .expect("hardcoded id should parse without errors; qed"); } @@ -119,6 +119,8 @@ pub struct ClusterSessions { pub struct ClusterSessionsContainer { /// Active sessions. pub sessions: RwLock>>, + /// Sessions container state. + container_state: Arc> } /// Session and its message queue. @@ -126,7 +128,7 @@ pub struct QueuedSession { /// Session master. pub master: NodeId, /// Cluster view. - pub cluster_view: Arc, + pub cluster_view: Arc, /// Last received message time. pub last_message_time: time::Instant, /// Generation session. @@ -135,6 +137,17 @@ pub struct QueuedSession { pub queue: VecDeque<(NodeId, M)>, } +/// Cluster sessions container state. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ClusterSessionsContainerState { + /// There's no active sessions => any session can be started. + Idle, + /// There are active sessions => exclusive session can't be started right now. + Active(usize), + /// Exclusive session is active => can't start any other sessions. + Exclusive, +} + /// Generation session implementation, which removes session from cluster on drop. pub struct GenerationSessionWrapper { /// Wrapped session. @@ -188,17 +201,18 @@ pub struct AdminSessionWrapper { impl ClusterSessions { /// Create new cluster sessions container. pub fn new(config: &ClusterConfiguration) -> Self { + let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle)); ClusterSessions { self_node_id: config.self_key_pair.public().clone(), nodes: config.key_server_set.get().keys().cloned().collect(), acl_storage: config.acl_storage.clone(), key_storage: config.key_storage.clone(), admin_public: config.admin_public.clone(), - generation_sessions: ClusterSessionsContainer::new(), - encryption_sessions: ClusterSessionsContainer::new(), - decryption_sessions: ClusterSessionsContainer::new(), - signing_sessions: ClusterSessionsContainer::new(), - admin_sessions: ClusterSessionsContainer::new(), + generation_sessions: ClusterSessionsContainer::new(container_state.clone()), + encryption_sessions: ClusterSessionsContainer::new(container_state.clone()), + decryption_sessions: ClusterSessionsContainer::new(container_state.clone()), + signing_sessions: ClusterSessionsContainer::new(container_state.clone()), + admin_sessions: ClusterSessionsContainer::new(container_state), make_faulty_generation_sessions: AtomicBool::new(false), session_counter: AtomicUsize::new(0), max_nonce: RwLock::new(BTreeMap::new()), @@ -211,7 +225,7 @@ impl ClusterSessions { } /// Create new generation session. - pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { // check that there's no finished encryption session with the same id if self.key_storage.contains(&session_id) { return Err(Error::DuplicateSessionId); @@ -225,7 +239,7 @@ impl ClusterSessions { // check that there's no active encryption session with the same id let nonce = self.check_session_nonce(&master, nonce)?; - self.generation_sessions.insert(master, session_id, cluster.clone(), move || + self.generation_sessions.insert(master, session_id, cluster.clone(), false, move || Ok(GenerationSessionImpl::new(GenerationSessionParams { id: session_id.clone(), self_node_id: self.self_node_id.clone(), @@ -254,11 +268,11 @@ impl ClusterSessions { } /// Create new encryption session. - pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { let encrypted_data = self.read_key_share(&session_id, &cluster)?; let nonce = self.check_session_nonce(&master, nonce)?; - self.encryption_sessions.insert(master, session_id, cluster.clone(), move || EncryptionSessionImpl::new(EncryptionSessionParams { + self.encryption_sessions.insert(master, session_id, cluster.clone(), false, move || EncryptionSessionImpl::new(EncryptionSessionParams { id: session_id.clone(), self_node_id: self.self_node_id.clone(), encrypted_data: encrypted_data, @@ -281,12 +295,12 @@ impl ClusterSessions { } /// Create new decryption session. - pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option, cluster: Arc, requester_signature: Option) -> Result, Error> { + pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option, cluster: Arc, requester_signature: Option) -> Result, Error> { let session_id = DecryptionSessionId::new(session_id, sub_session_id); let encrypted_data = self.read_key_share(&session_id.id, &cluster)?; let nonce = self.check_session_nonce(&master, nonce)?; - self.decryption_sessions.insert(master, session_id.clone(), cluster.clone(), move || DecryptionSessionImpl::new(DecryptionSessionParams { + self.decryption_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || DecryptionSessionImpl::new(DecryptionSessionParams { meta: SessionMeta { id: session_id.id, self_node_id: self.self_node_id.clone(), @@ -320,12 +334,12 @@ impl ClusterSessions { } /// Create new signing session. - pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option, cluster: Arc, requester_signature: Option) -> Result, Error> { + pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option, cluster: Arc, requester_signature: Option) -> Result, Error> { let session_id = SigningSessionId::new(session_id, sub_session_id); let encrypted_data = self.read_key_share(&session_id.id, &cluster)?; let nonce = self.check_session_nonce(&master, nonce)?; - self.signing_sessions.insert(master, session_id.clone(), cluster.clone(), move || SigningSessionImpl::new(SigningSessionParams { + self.signing_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || SigningSessionImpl::new(SigningSessionParams { meta: SessionMeta { id: session_id.id, self_node_id: self.self_node_id.clone(), @@ -359,11 +373,11 @@ impl ClusterSessions { } /// Create new share add session. - pub fn new_share_add_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + pub fn new_share_add_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { let nonce = self.check_session_nonce(&master, nonce)?; let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; - self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareAddSessionImpl::new(ShareAddSessionParams { + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareAddSessionImpl::new(ShareAddSessionParams { meta: ShareChangeSessionMeta { id: session_id, self_node_id: self.self_node_id.clone(), @@ -389,11 +403,11 @@ impl ClusterSessions { } /// Create new share move session. - pub fn new_share_move_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + pub fn new_share_move_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { let nonce = self.check_session_nonce(&master, nonce)?; let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; - self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareMoveSessionImpl::new(ShareMoveSessionParams { + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareMoveSessionImpl::new(ShareMoveSessionParams { meta: ShareChangeSessionMeta { id: session_id, self_node_id: self.self_node_id.clone(), @@ -419,11 +433,11 @@ impl ClusterSessions { } /// Create new share remove session. - pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { let nonce = self.check_session_nonce(&master, nonce)?; let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; - self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ShareRemoveSessionImpl::new(ShareRemoveSessionParams { + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareRemoveSessionImpl::new(ShareRemoveSessionParams { meta: ShareChangeSessionMeta { id: session_id, self_node_id: self.self_node_id.clone(), @@ -449,7 +463,7 @@ impl ClusterSessions { } /// Create new servers set change session. - pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option, nonce: Option, cluster: Arc, all_nodes_set: BTreeSet) -> Result, Error> { + pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option, nonce: Option, cluster: Arc, all_nodes_set: BTreeSet) -> Result, Error> { // TODO: check if there's no other active sessions + do not allow to start other sessions when this session is active let session_id = match session_id { Some(session_id) => if session_id == *SERVERS_SET_CHANGE_SESSION_ID { @@ -462,7 +476,7 @@ impl ClusterSessions { let nonce = self.check_session_nonce(&master, nonce)?; let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; - self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), move || ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams { + self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), true, move || ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams { meta: ShareChangeSessionMeta { id: session_id, self_node_id: self.self_node_id.clone(), @@ -511,7 +525,7 @@ impl ClusterSessions { } /// Read key share && remove disconnected nodes. - fn read_key_share(&self, key_id: &SessionId, cluster: &Arc) -> Result { + fn read_key_share(&self, key_id: &SessionId, cluster: &Arc) -> Result { let mut encrypted_data = self.key_storage.get(key_id).map_err(|e| Error::KeyStorage(e.into()))?; // some of nodes, which were encrypting secret may be down @@ -540,9 +554,10 @@ impl ClusterSessions { } impl ClusterSessionsContainer where K: Clone + Ord, V: ClusterSession { - pub fn new() -> Self { + pub fn new(container_state: Arc>) -> Self { ClusterSessionsContainer { sessions: RwLock::new(BTreeMap::new()), + container_state: container_state, } } @@ -554,13 +569,18 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster self.sessions.read().get(session_id).map(|s| s.session.clone()) } - pub fn insert Result>(&self, master: NodeId, session_id: K, cluster: Arc, session: F) -> Result, Error> { + pub fn insert Result>(&self, master: NodeId, session_id: K, cluster: Arc, is_exclusive_session: bool, session: F) -> Result, Error> { let mut sessions = self.sessions.write(); if sessions.contains_key(&session_id) { return Err(Error::DuplicateSessionId); } + // create session let session = Arc::new(session()?); + // check if session can be started + self.container_state.lock().on_session_starting(is_exclusive_session)?; + + // insert session let queued_session = QueuedSession { master: master, cluster_view: cluster, @@ -573,7 +593,9 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster } pub fn remove(&self, session_id: &K) { - self.sessions.write().remove(session_id); + if self.sessions.write().remove(session_id).is_some() { + self.container_state.lock().on_session_completed(); + } } pub fn enqueue_message(&self, session_id: &K, sender: NodeId, message: M, is_queued_message: bool) { @@ -621,6 +643,45 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster } } +impl ClusterSessionsContainerState { + /// When session is starting. + pub fn on_session_starting(&mut self, is_exclusive_session: bool) -> Result<(), Error> { + match *self { + ClusterSessionsContainerState::Idle if is_exclusive_session => { + ::std::mem::replace(self, ClusterSessionsContainerState::Exclusive); + }, + ClusterSessionsContainerState::Idle => { + ::std::mem::replace(self, ClusterSessionsContainerState::Active(1)); + }, + ClusterSessionsContainerState::Active(_) if is_exclusive_session => + return Err(Error::HasActiveSessions), + ClusterSessionsContainerState::Active(sessions_count) => { + ::std::mem::replace(self, ClusterSessionsContainerState::Active(sessions_count + 1)); + }, + ClusterSessionsContainerState::Exclusive => + return Err(Error::ExclusiveSessionActive), + } + Ok(()) + } + + /// When session is completed. + pub fn on_session_completed(&mut self) { + match *self { + ClusterSessionsContainerState::Idle => + unreachable!("idle means that there are no active sessions; on_session_completed is only called once after active session is completed; qed"), + ClusterSessionsContainerState::Active(sessions_count) if sessions_count == 1 => { + ::std::mem::replace(self, ClusterSessionsContainerState::Idle); + }, + ClusterSessionsContainerState::Active(sessions_count) => { + ::std::mem::replace(self, ClusterSessionsContainerState::Active(sessions_count - 1)); + } + ClusterSessionsContainerState::Exclusive => { + ::std::mem::replace(self, ClusterSessionsContainerState::Idle); + }, + } + } +} + impl AdminSession { pub fn as_share_add(&self) -> Option<&ShareAddSessionImpl> { match *self { @@ -841,3 +902,53 @@ impl Drop for AdminSessionWrapper { } } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::collections::BTreeSet; + use ethkey::{Random, Generator}; + use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair}; + use key_server_cluster::cluster::ClusterConfiguration; + use key_server_cluster::cluster::tests::DummyCluster; + use super::ClusterSessions; + + pub fn make_cluster_sessions() -> ClusterSessions { + let key_pair = Random.generate().unwrap(); + let config = ClusterConfiguration { + threads: 1, + self_key_pair: Arc::new(PlainNodeKeyPair::new(key_pair.clone())), + listen_address: ("127.0.0.1".to_owned(), 100_u16), + key_server_set: Arc::new(MapKeyServerSet::new(vec![(key_pair.public().clone(), format!("127.0.0.1:{}", 100).parse().unwrap())].into_iter().collect())), + allow_connecting_to_higher_nodes: false, + key_storage: Arc::new(DummyKeyStorage::default()), + acl_storage: Arc::new(DummyAclStorage::default()), + admin_public: Some(Random.generate().unwrap().public().clone()), + }; + ClusterSessions::new(&config) + } + + #[test] + fn cluster_session_cannot_be_started_if_exclusive_session_is_active() { + let sessions = make_cluster_sessions(); + + sessions.new_generation_session(Default::default(), Default::default(), Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone()))).unwrap(); + match sessions.new_servers_set_change_session(Default::default(), None, Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone())), BTreeSet::new()) { + Err(Error::HasActiveSessions) => (), + Err(e) => unreachable!(format!("{}", e)), + Ok(_) => unreachable!("OK"), + } + } + + #[test] + fn exclusive_session_cannot_be_started_if_other_session_is_active() { + let sessions = make_cluster_sessions(); + + sessions.new_servers_set_change_session(Default::default(), None, Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone())), BTreeSet::new()).unwrap(); + match sessions.new_generation_session(Default::default(), Default::default(), Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone()))) { + Err(Error::ExclusiveSessionActive) => (), + Err(e) => unreachable!(format!("{}", e)), + Ok(_) => unreachable!("OK"), + } + } +} diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index f83677830..99c53b248 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -107,6 +107,10 @@ pub enum Error { ConsensusUnreachable, /// Acl storage error. AccessDenied, + /// Can't start session, because exclusive session is active. + ExclusiveSessionActive, + /// Can't start exclusive session, because there are other active sessions. + HasActiveSessions, } impl From for Error { @@ -152,6 +156,8 @@ impl fmt::Display for Error { Error::KeyStorage(ref e) => write!(f, "key storage error {}", e), Error::ConsensusUnreachable => write!(f, "Consensus unreachable"), Error::AccessDenied => write!(f, "Access denied"), + Error::ExclusiveSessionActive => write!(f, "Exclusive session active"), + Error::HasActiveSessions => write!(f, "Unable to start exclusive session"), } } } @@ -175,7 +181,6 @@ pub use self::client_sessions::decryption_session; pub use self::client_sessions::encryption_session; pub use self::client_sessions::generation_session; pub use self::client_sessions::signing_session; - mod cluster; mod cluster_sessions; mod io; From 309155250431ac349aec886ce8f5e46dbafb4907 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 3 Oct 2017 12:49:43 +0300 Subject: [PATCH 03/22] fix indentation --- secret_store/src/key_server_cluster/cluster.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index c9cf30cdb..aecb43e97 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -656,7 +656,7 @@ impl ClusterCore { }, Err(err) => { warn!(target: "secretstore_net", "{}: decryption session error '{}' when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); -let error_message = message::DecryptionSessionError { + let error_message = message::DecryptionSessionError { session: session_id.clone().into(), sub_session: sub_session_id.clone().into(), session_nonce: session_nonce, From 1b1548f53910e5e02db5853e9a3a940d5d47310e Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 4 Oct 2017 12:16:45 +0300 Subject: [PATCH 04/22] removed obsolete TODO --- secret_store/src/key_server_cluster/cluster_sessions.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index 15c3e0bcb..aa6244dbc 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -464,7 +464,6 @@ impl ClusterSessions { /// Create new servers set change session. pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option, nonce: Option, cluster: Arc, all_nodes_set: BTreeSet) -> Result, Error> { - // TODO: check if there's no other active sessions + do not allow to start other sessions when this session is active let session_id = match session_id { Some(session_id) => if session_id == *SERVERS_SET_CHANGE_SESSION_ID { session_id From 4260910db6998b8d74f6b2e03db5d792089a7e18 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 4 Oct 2017 13:15:59 +0200 Subject: [PATCH 05/22] WASM Runtime refactoring (#6596) * refactoring to new pwasm-std * pass reference * remove ref * missing underscores --- ethcore/res/wasm-tests | 2 +- ethcore/wasm/src/call_args.rs | 63 ------------------------------- ethcore/wasm/src/env.rs | 20 ++++++++++ ethcore/wasm/src/lib.rs | 19 ++++------ ethcore/wasm/src/runtime.rs | 71 +++++++++++++++++++++++++---------- ethcore/wasm/src/tests.rs | 40 ++++++++++---------- 6 files changed, 102 insertions(+), 113 deletions(-) delete mode 100644 ethcore/wasm/src/call_args.rs diff --git a/ethcore/res/wasm-tests b/ethcore/res/wasm-tests index fcac936bf..c8129ce2f 160000 --- a/ethcore/res/wasm-tests +++ b/ethcore/res/wasm-tests @@ -1 +1 @@ -Subproject commit fcac936bf68cc271a6a6ac088efb458f3a08f38a +Subproject commit c8129ce2f36c26ed634eda786960978a28e28d0e diff --git a/ethcore/wasm/src/call_args.rs b/ethcore/wasm/src/call_args.rs deleted file mode 100644 index 7fb50bff3..000000000 --- a/ethcore/wasm/src/call_args.rs +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Wasm evm call arguments helper - -use bigint::prelude::U256; -use bigint::hash::H160; - -/// Input part of the wasm call descriptor -pub struct CallArgs { - /// Receiver of the transaction - pub address: [u8; 20], - - /// Sender of the transaction - pub sender: [u8; 20], - - /// Original transaction initiator - pub origin: [u8; 20], - - /// Transfer value - pub value: [u8; 32], - - /// call/create params - pub data: Vec, -} - -impl CallArgs { - /// New contract call payload with known parameters - pub fn new(address: H160, sender: H160, origin: H160, value: U256, data: Vec) -> Self { - let mut descriptor = CallArgs { - address: [0u8; 20], - sender: [0u8; 20], - origin: [0u8; 20], - value: [0u8; 32], - data: data, - }; - - descriptor.address.copy_from_slice(&*address); - descriptor.sender.copy_from_slice(&*sender); - descriptor.origin.copy_from_slice(&*origin); - value.to_big_endian(&mut descriptor.value); - - descriptor - } - - /// Total call payload length in linear memory - pub fn len(&self) -> u32 { - self.data.len() as u32 + 92 - } -} diff --git a/ethcore/wasm/src/env.rs b/ethcore/wasm/src/env.rs index c32e3ed84..8f4fa264f 100644 --- a/ethcore/wasm/src/env.rs +++ b/ethcore/wasm/src/env.rs @@ -102,6 +102,26 @@ pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[ &[I32], None, ), + Static( + "_sender", + &[I32], + None, + ), + Static( + "_origin", + &[I32], + None, + ), + Static( + "_address", + &[I32], + None, + ), + Static( + "_value", + &[I32], + None, + ), Static( "_timestamp", &[], diff --git a/ethcore/wasm/src/lib.rs b/ethcore/wasm/src/lib.rs index 8eb14a91a..a68d40d86 100644 --- a/ethcore/wasm/src/lib.rs +++ b/ethcore/wasm/src/lib.rs @@ -27,7 +27,6 @@ extern crate wasm_utils; mod runtime; mod ptr; -mod call_args; mod result; #[cfg(test)] mod tests; @@ -107,7 +106,12 @@ impl vm::Vm for WasmInterpreter { env_memory, DEFAULT_STACK_SPACE, params.gas.low_u64(), - RuntimeContext::new(params.address, params.sender), + RuntimeContext { + address: params.address, + sender: params.sender, + origin: params.origin, + value: params.value.value(), + }, &self.program, ); @@ -121,15 +125,8 @@ impl vm::Vm for WasmInterpreter { })? ); - let d_ptr = runtime.write_descriptor( - call_args::CallArgs::new( - params.address, - params.sender, - params.origin, - params.value.value(), - params.data.unwrap_or(Vec::with_capacity(0)), - ) - ).map_err(|e| Error(e))?; + let d_ptr = runtime.write_descriptor(¶ms.data.unwrap_or_default()) + .map_err(Error)?; { let execution_params = runtime.execution_params() diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index 8376d0f39..5e18ce216 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -28,7 +28,6 @@ use util::Address; use vm::CallType; use super::ptr::{WasmPtr, Error as PtrError}; -use super::call_args::CallArgs; /// User trap in native code #[derive(Debug, Clone, PartialEq)] @@ -97,17 +96,10 @@ impl From for UserTrap { } pub struct RuntimeContext { - address: Address, - sender: Address, -} - -impl RuntimeContext { - pub fn new(address: Address, sender: Address) -> Self { - RuntimeContext { - address: address, - sender: sender, - } - } + pub address: Address, + pub sender: Address, + pub origin: Address, + pub value: U256, } /// Runtime enviroment data for wasm contract execution @@ -442,10 +434,10 @@ impl<'a, 'b> Runtime<'a, 'b> { } /// Write call descriptor to wasm memory - pub fn write_descriptor(&mut self, call_args: CallArgs) -> Result { + pub fn write_descriptor(&mut self, input: &[u8]) -> Result { let d_ptr = self.alloc(16)?; - let args_len = call_args.len(); + let args_len = input.len() as u32; let args_ptr = self.alloc(args_len)?; // write call descriptor @@ -457,11 +449,7 @@ impl<'a, 'b> Runtime<'a, 'b> { self.memory.set(d_ptr, &d_buf)?; // write call args to memory - self.memory.set(args_ptr, &call_args.address)?; - self.memory.set(args_ptr+20, &call_args.sender)?; - self.memory.set(args_ptr+40, &call_args.origin)?; - self.memory.set(args_ptr+60, &call_args.value)?; - self.memory.set(args_ptr+92, &call_args.data)?; + self.memory.set(args_ptr, input)?; Ok(d_ptr.into()) } @@ -559,6 +547,39 @@ impl<'a, 'b> Runtime<'a, 'b> { Ok(None) } + fn sender(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + self.memory.set(return_ptr, &*self.context.sender)?; + Ok(None) + } + + fn address(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + self.memory.set(return_ptr, &*self.context.address)?; + Ok(None) + } + + fn origin(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + self.memory.set(return_ptr, &*self.context.origin)?; + Ok(None) + } + + fn value(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let return_ptr = context.value_stack.pop_as::()? as u32; + let value: H256 = self.context.value.clone().into(); + self.memory.set(return_ptr, &*value)?; + Ok(None) + } + fn timestamp(&mut self, _context: InterpreterCallerContext) -> Result, InterpreterError> { @@ -691,6 +712,18 @@ impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> { "_gaslimit" => { self.ext_gas_limit(context) }, + "_sender" => { + self.sender(context) + }, + "_address" => { + self.address(context) + }, + "_origin" => { + self.origin(context) + }, + "_value" => { + self.value(context) + }, _ => { trace!(target: "wasm", "Trapped due to unhandled function: '{}'", name); Ok(self.unknown_trap(context)?) diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index 22eeb23f5..5a6be6e2a 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -88,7 +88,7 @@ fn logger() { }; println!("ext.store: {:?}", ext.store); - assert_eq!(gas_left, U256::from(98417)); + assert_eq!(gas_left, U256::from(98_731)); let address_val: H256 = address.into(); assert_eq!( ext.store.get(&"0100000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"), @@ -121,6 +121,8 @@ fn logger() { // if it has any result. #[test] fn identity() { + ::ethcore_logger::init_log(); + let code = load_sample!("identity.wasm"); let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); @@ -139,7 +141,7 @@ fn identity() { } }; - assert_eq!(gas_left, U256::from(99_732)); + assert_eq!(gas_left, U256::from(99_812)); assert_eq!( Address::from_slice(&result), @@ -173,7 +175,7 @@ fn dispersion() { } }; - assert_eq!(gas_left, U256::from(99_421)); + assert_eq!(gas_left, U256::from(99_474)); assert_eq!( result, @@ -202,7 +204,7 @@ fn suicide_not() { } }; - assert_eq!(gas_left, U256::from(99_664)); + assert_eq!(gas_left, U256::from(99_691)); assert_eq!( result, @@ -236,7 +238,7 @@ fn suicide() { } }; - assert_eq!(gas_left, U256::from(99_420)); + assert_eq!(gas_left, U256::from(99_490)); assert!(ext.suicides.contains(&refund)); } @@ -267,7 +269,7 @@ fn create() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Create, - gas: U256::from(98_908), + gas: U256::from(99_144), sender_address: None, receive_address: None, value: Some(1_000_000_000.into()), @@ -275,7 +277,7 @@ fn create() { code_address: None, } )); - assert_eq!(gas_left, U256::from(98_860)); + assert_eq!(gas_left, U256::from(99_113)); } @@ -309,7 +311,7 @@ fn call_code() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Call, - gas: U256::from(99_108), + gas: U256::from(99_138), sender_address: Some(sender), receive_address: Some(receiver), value: None, @@ -317,7 +319,7 @@ fn call_code() { code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()), } )); - assert_eq!(gas_left, U256::from(94_241)); + assert_eq!(gas_left, U256::from(94_269)); // siphash result let res = LittleEndian::read_u32(&result[..]); @@ -354,7 +356,7 @@ fn call_static() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Call, - gas: U256::from(99_108), + gas: U256::from(99_138), sender_address: Some(sender), receive_address: Some(receiver), value: None, @@ -362,7 +364,7 @@ fn call_static() { code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), } )); - assert_eq!(gas_left, U256::from(94_241)); + assert_eq!(gas_left, U256::from(94_269)); // siphash result let res = LittleEndian::read_u32(&result[..]); @@ -388,7 +390,7 @@ fn realloc() { GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), } }; - assert_eq!(gas_left, U256::from(99_562)); + assert_eq!(gas_left, U256::from(99_614)); assert_eq!(result, vec![0u8; 2]); } @@ -414,7 +416,7 @@ fn storage_read() { } }; - assert_eq!(gas_left, U256::from(99_673)); + assert_eq!(gas_left, U256::from(99_695)); assert_eq!(Address::from(&result[12..32]), address); } @@ -441,7 +443,7 @@ fn keccak() { }; assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87")); - assert_eq!(gas_left, U256::from(84003)); + assert_eq!(gas_left, U256::from(84_026)); } @@ -495,7 +497,7 @@ fn math_add() { } ).expect("Interpreter to execute without any errors"); - assert_eq!(gas_left, U256::from(98177)); + assert_eq!(gas_left, U256::from(98_241)); assert_eq!( U256::from_dec_str("1888888888888888888888888888887").unwrap(), (&result[..]).into() @@ -517,7 +519,7 @@ fn math_mul() { } ).expect("Interpreter to execute without any errors"); - assert_eq!(gas_left, U256::from(97326)); + assert_eq!(gas_left, U256::from(97_390)); assert_eq!( U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(), (&result[..]).into() @@ -539,7 +541,7 @@ fn math_sub() { } ).expect("Interpreter to execute without any errors"); - assert_eq!(gas_left, U256::from(98221)); + assert_eq!(gas_left, U256::from(98_285)); assert_eq!( U256::from_dec_str("111111111111111111111111111111").unwrap(), (&result[..]).into() @@ -578,7 +580,7 @@ fn math_div() { } ).expect("Interpreter to execute without any errors"); - assert_eq!(gas_left, U256::from(91_562)); + assert_eq!(gas_left, U256::from(91_574)); assert_eq!( U256::from_dec_str("1125000").unwrap(), (&result[..]).into() @@ -670,5 +672,5 @@ fn externs() { "Gas limit requested and returned does not match" ); - assert_eq!(gas_left, U256::from(95_999)); + assert_eq!(gas_left, U256::from(96_284)); } From b7c2a30d7d2172f457750ea4993963105e710d58 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Wed, 4 Oct 2017 14:58:31 +0200 Subject: [PATCH 06/22] Don't expose port 80 for parity anymore (#6633) --- nsis/installer.nsi | 1 - 1 file changed, 1 deletion(-) diff --git a/nsis/installer.nsi b/nsis/installer.nsi index 5b7940302..25ba98863 100644 --- a/nsis/installer.nsi +++ b/nsis/installer.nsi @@ -116,7 +116,6 @@ section "install" # Firewall exception rules SimpleFC::AdvAddRule "Parity incoming peers (TCP:30303)" "" 6 1 1 2147483647 1 "$INSTDIR\parity.exe" "" "" "Parity" 30303 "" "" "" SimpleFC::AdvAddRule "Parity outgoing peers (TCP:30303)" "" 6 2 1 2147483647 1 "$INSTDIR\parity.exe" "" "" "Parity" "" 30303 "" "" - SimpleFC::AdvAddRule "Parity web queries (TCP:80)" "" 6 2 1 2147483647 1 "$INSTDIR\parity.exe" "" "" "Parity" "" 80 "" "" SimpleFC::AdvAddRule "Parity UDP discovery (UDP:30303)" "" 17 2 1 2147483647 1 "$INSTDIR\parity.exe" "" "" "Parity" "" 30303 "" "" # Registry information for add/remove programs From 6956d218b50fe2d6a0f47f53d259d064bc6e3ff7 Mon Sep 17 00:00:00 2001 From: fro Date: Wed, 4 Oct 2017 19:09:18 +0300 Subject: [PATCH 07/22] address balance extern provided for wasm --- ethcore/wasm/src/env.rs | 7 ++++++- ethcore/wasm/src/runtime.rs | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/ethcore/wasm/src/env.rs b/ethcore/wasm/src/env.rs index 8f4fa264f..a09ceea86 100644 --- a/ethcore/wasm/src/env.rs +++ b/ethcore/wasm/src/env.rs @@ -32,6 +32,11 @@ pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[ &[I32; 2], Some(I32), ), + Static( + "_balance", + &[I32; 2], + None, + ), Static( "_malloc", &[I32], @@ -163,4 +168,4 @@ pub fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserDefined globals: ::std::collections::HashMap::new(), functions: ::std::borrow::Cow::from(SIGNATURES), } -} \ No newline at end of file +} diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index 5e18ce216..fa30d7753 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -163,6 +163,19 @@ impl<'a, 'b> Runtime<'a, 'b> { Ok(Some(0.into())) } + /// Fetches balance for address + pub fn balance(&mut self, context: InterpreterCallerContext) + -> Result, InterpreterError> + { + let mut context = context; + let return_ptr = context.value_stack.pop_as::()? as u32; + let address = self.pop_address(&mut context)?; + let balance = self.ext.balance(&address).map_err(|_| UserTrap::BalanceQueryError)?; + let value: H256 = balance.into(); + self.memory.set(return_ptr, &*value)?; + Ok(None) + } + /// Pass suicide to state runtime pub fn suicide(&mut self, context: InterpreterCallerContext) -> Result, InterpreterError> @@ -664,6 +677,9 @@ impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> { "_storage_write" => { self.storage_write(context) }, + "_balance" => { + self.balance(context) + }, "_suicide" => { self.suicide(context) }, From 8961d987a9717393f32fdfbf0b7801e122d55d27 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 4 Oct 2017 22:12:45 +0300 Subject: [PATCH 08/22] fixed port offset in test --- secret_store/src/key_server_cluster/cluster.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index aecb43e97..44203efec 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -1634,7 +1634,7 @@ pub mod tests { fn generation_session_completion_signalled_if_failed_on_master() { //::logger::init_log(); let mut core = Core::new().unwrap(); - let clusters = make_clusters(&core, 6023, 3); + let clusters = make_clusters(&core, 6025, 3); run_clusters(&clusters); loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); From e8b418ca03866fd952d456830b30e9225c81035a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 5 Oct 2017 12:35:01 +0200 Subject: [PATCH 09/22] Update jsonrpc dependencies and rewrite dapps to futures. (#6522) * Bump version. * Fix RPC crate. * Fix BoxFuture in crates. * Compiles and passes tests! * Get rid of .boxed() * Fixing issues with the UI. * Remove minihttp. Support threads. * Reimplement files serving to do it in chunks. * Increase chunk size. * Remove some unecessary copying. * Fix tests. * Fix stratum warning and ipfs todo. * Switch to proper branch of jsonrpc. * Update Cargo.lock. * Update docs. * Include dapps-glue in workspace. * fixed merge artifacts * Fix test compilation. --- Cargo.lock | 384 ++++++++-------- Cargo.toml | 4 +- dapps/Cargo.toml | 16 +- dapps/js-glue/Cargo.toml | 4 +- dapps/node-health/src/health.rs | 8 +- dapps/node-health/src/time.rs | 10 +- dapps/src/api/api.rs | 162 ++----- dapps/src/api/response.rs | 25 +- dapps/src/apps/cache.rs | 4 +- dapps/src/apps/fetcher/installers.rs | 51 ++- dapps/src/apps/fetcher/mod.rs | 110 ++--- dapps/src/apps/fs.rs | 18 +- dapps/src/apps/mod.rs | 28 +- dapps/src/apps/ui.rs | 18 +- dapps/src/endpoint.rs | 21 +- dapps/src/handlers/async.rs | 112 ----- dapps/src/handlers/content.rs | 60 +-- dapps/src/handlers/echo.rs | 50 +- dapps/src/handlers/fetch.rs | 427 +++++++++--------- dapps/src/handlers/mod.rs | 88 +--- dapps/src/handlers/reader.rs | 73 +++ dapps/src/handlers/redirect.rs | 36 +- dapps/src/handlers/streaming.rs | 94 +--- dapps/src/lib.rs | 50 +- dapps/src/page/builtin.rs | 185 ++++---- dapps/src/page/handler.rs | 271 ++--------- dapps/src/page/local.rs | 182 +++----- dapps/src/page/mod.rs | 8 +- dapps/src/proxypac.rs | 12 +- dapps/src/router.rs | 388 +++++++++------- dapps/src/tests/api.rs | 1 + dapps/src/tests/fetch.rs | 6 +- dapps/src/tests/helpers/fetch.rs | 15 +- dapps/src/tests/helpers/mod.rs | 138 +++--- dapps/src/tests/helpers/registrar.rs | 8 +- dapps/src/tests/home.rs | 62 +++ dapps/src/tests/mod.rs | 1 + dapps/src/tests/redirection.rs | 3 +- dapps/src/tests/validation.rs | 8 +- dapps/src/url.rs | 150 ------ dapps/src/web.rs | 211 +++------ ethcore/Cargo.toml | 8 +- ethcore/native_contracts/generator/src/lib.rs | 11 +- ethcore/src/engines/validator_set/multi.rs | 2 - ethcore/src/lib.rs | 4 - hash-fetch/Cargo.toml | 10 +- hash-fetch/src/lib.rs | 12 +- hash-fetch/src/urlhint.rs | 31 +- hw/src/ledger.rs | 2 +- ipfs/Cargo.toml | 5 +- ipfs/src/lib.rs | 264 ++++------- js/Cargo.precompiled.toml | 4 +- js/Cargo.toml | 4 +- logger/src/lib.rs | 1 + parity/cli/mod.rs | 2 +- parity/configuration.rs | 12 +- parity/dapps.rs | 23 +- parity/light_helpers/epoch_fetch.rs | 15 +- parity/light_helpers/queue_cull.rs | 6 +- parity/rpc.rs | 15 +- parity/run.rs | 2 - price-info/src/lib.rs | 7 +- rpc/Cargo.toml | 14 +- rpc/src/http_common.rs | 40 +- rpc/src/lib.rs | 72 +-- rpc/src/tests/rpc.rs | 13 +- rpc/src/v1/extractors.rs | 2 +- rpc/src/v1/helpers/dispatch.rs | 80 ++-- rpc/src/v1/helpers/errors.rs | 4 +- rpc/src/v1/helpers/light_fetch.rs | 62 ++- rpc/src/v1/helpers/oneshot.rs | 4 +- rpc/src/v1/helpers/subscription_manager.rs | 6 +- rpc/src/v1/impls/eth.rs | 79 ++-- rpc/src/v1/impls/eth_filter.rs | 30 +- rpc/src/v1/impls/eth_pubsub.rs | 22 +- rpc/src/v1/impls/light/eth.rs | 129 +++--- rpc/src/v1/impls/light/parity.rs | 29 +- rpc/src/v1/impls/light/parity_set.rs | 4 +- rpc/src/v1/impls/light/trace.rs | 9 +- rpc/src/v1/impls/parity.rs | 52 +-- rpc/src/v1/impls/parity_accounts.rs | 2 +- rpc/src/v1/impls/parity_set.rs | 4 +- rpc/src/v1/impls/personal.rs | 11 +- rpc/src/v1/impls/pubsub.rs | 6 +- rpc/src/v1/impls/signer.rs | 36 +- rpc/src/v1/impls/signing.rs | 47 +- rpc/src/v1/impls/signing_unsafe.rs | 35 +- rpc/src/v1/impls/traces.rs | 23 +- rpc/src/v1/impls/web3.rs | 2 +- rpc/src/v1/informant.rs | 13 +- rpc/src/v1/mod.rs | 2 +- rpc/src/v1/tests/helpers/fetch.rs | 6 +- rpc/src/v1/tests/mocked/signing.rs | 2 +- rpc/src/v1/traits/eth.rs | 34 +- rpc/src/v1/traits/eth_pubsub.rs | 3 +- rpc/src/v1/traits/eth_signing.rs | 3 +- rpc/src/v1/traits/parity.rs | 15 +- rpc/src/v1/traits/parity_set.rs | 5 +- rpc/src/v1/traits/parity_signing.rs | 3 +- rpc/src/v1/traits/personal.rs | 4 +- rpc/src/v1/traits/pubsub.rs | 3 +- rpc/src/v1/traits/signer.rs | 9 +- rpc/src/v1/traits/traces.rs | 5 +- rpc_client/Cargo.toml | 7 +- rpc_client/src/client.rs | 14 +- rpc_client/src/lib.rs | 3 - rpc_client/src/signer_client.rs | 3 +- .../src/key_server_cluster/cluster.rs | 2 +- .../src/key_server_cluster/io/deadline.rs | 2 +- stratum/Cargo.toml | 10 +- stratum/src/lib.rs | 27 +- updater/src/updater.rs | 8 +- util/fetch/Cargo.toml | 3 +- util/fetch/src/client.rs | 31 +- util/fetch/src/lib.rs | 3 +- whisper/Cargo.toml | 7 +- whisper/src/lib.rs | 5 +- whisper/src/rpc/mod.rs | 7 +- 118 files changed, 2090 insertions(+), 2908 deletions(-) delete mode 100644 dapps/src/handlers/async.rs create mode 100644 dapps/src/handlers/reader.rs create mode 100644 dapps/src/tests/home.rs delete mode 100644 dapps/src/url.rs diff --git a/Cargo.lock b/Cargo.lock index 13907b3c1..fe53dbe7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -39,11 +39,6 @@ name = "ansi_term" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "antidote" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "app_dirs" version = "1.1.1" @@ -292,6 +287,14 @@ dependencies = [ "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "clippy" +version = "0.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "clippy" version = "0.0.103" @@ -309,6 +312,20 @@ dependencies = [ "clippy_lints 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "clippy_lints" +version = "0.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "clippy_lints" version = "0.0.103" @@ -553,7 +570,6 @@ name = "ethcore" version = "1.8.0" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomable 0.1.0", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bn 0.4.4 (git+https://github.com/paritytech/bn)", @@ -561,8 +577,6 @@ dependencies = [ "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.8.0", "ethcore-bigint 0.1.3", "ethcore-bloom-journal 0.1.0", @@ -579,7 +593,7 @@ dependencies = [ "ethkey 0.2.0", "ethstore 0.1.0", "evm 0.1.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "hardware-wallet 1.8.0", "hash 0.1.0", "hashdb 0.1.0", @@ -587,7 +601,7 @@ dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "memorydb 0.1.0", @@ -605,7 +619,6 @@ dependencies = [ "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "semantic_version 0.1.0", - "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "table 0.1.0", "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", @@ -735,7 +748,7 @@ dependencies = [ "ethcore-network 1.8.0", "ethcore-util 1.8.0", "evm 0.1.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -820,7 +833,7 @@ dependencies = [ "ethcore-util 1.8.0", "ethcrypto 0.1.0", "ethkey 0.2.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -852,16 +865,14 @@ dependencies = [ "ethcore-ipc-nano 1.8.0", "ethcore-logger 1.8.0", "ethcore-util 1.8.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-tcp-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1077,12 +1088,11 @@ dependencies = [ name = "fetch" version = "0.1.0" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "reqwest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "reqwest 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1116,7 +1126,7 @@ dependencies = [ [[package]] name = "futures" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1124,7 +1134,7 @@ name = "futures-cpupool" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1269,13 +1279,39 @@ dependencies = [ ] [[package]] -name = "hyper-native-tls" -version = "0.2.4" +name = "hyper" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper-tls" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1363,10 +1399,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1375,25 +1411,24 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ - "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-ipc-server" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1401,47 +1436,32 @@ dependencies = [ [[package]] name = "jsonrpc-macros" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "jsonrpc-minihttp-server" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" -dependencies = [ - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)", - "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", - "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "jsonrpc-pubsub" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-server-utils" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1449,25 +1469,23 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-ws-server" -version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" +version = "8.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1533,12 +1551,12 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "linked-hash-map" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1634,17 +1652,6 @@ dependencies = [ "unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "mime_guess" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "mime_guess" version = "2.0.0-alpha.2" @@ -1775,7 +1782,7 @@ dependencies = [ "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-bigint 0.1.3", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "native-contract-generator 0.1.0", ] @@ -1828,7 +1835,7 @@ dependencies = [ "ethcore-io 1.8.0", "ethcore-network 1.8.0", "ethcore-util 1.8.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1838,7 +1845,7 @@ dependencies = [ name = "node-health" version = "0.1.0" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "ntp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2053,12 +2060,12 @@ dependencies = [ "ethkey 0.2.0", "ethsync 1.8.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "node-filter 1.8.0", "node-health 0.1.0", @@ -2105,15 +2112,15 @@ dependencies = [ "ethcore-devtools 1.8.0", "ethcore-util 1.8.0", "fetch 0.1.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 1.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)", "node-health 0.1.0", "parity-dapps-glue 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-hash-fetch 1.8.0", @@ -2125,12 +2132,25 @@ dependencies = [ "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "zip 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parity-dapps-glue" +version = "1.8.0" +dependencies = [ + "aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parity-dapps-glue" version = "1.8.0" @@ -2154,11 +2174,11 @@ dependencies = [ "ethcore-bytes 0.1.0", "ethcore-util 1.8.0", "fetch 0.1.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 1.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", "parity-reactor 0.1.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2175,10 +2195,11 @@ dependencies = [ "ethcore-bigint 0.1.3", "ethcore-bytes 0.1.0", "ethcore-util 1.8.0", - "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", + "unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2208,7 +2229,7 @@ dependencies = [ name = "parity-reactor" version = "0.1.0" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2236,18 +2257,16 @@ dependencies = [ "ethstore 0.1.0", "ethsync 1.8.0", "fetch 0.1.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "hardware-wallet 1.8.0", "hash 0.1.0", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-ipc-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "node-health 0.1.0", @@ -2275,19 +2294,16 @@ dependencies = [ name = "parity-rpc-client" version = "1.4.0" dependencies = [ - "ethcore-util 1.8.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "hash 0.1.0", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc 1.8.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2297,7 +2313,7 @@ version = "0.1.5" source = "git+https://github.com/nikvolf/parity-tokio-ipc#d6c5b3cfcc913a1b9cf0f0562a10b083ceb9fb7c" dependencies = [ "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2344,7 +2360,7 @@ dependencies = [ "ethcore-ipc-codegen 1.8.0", "ethcore-util 1.8.0", "ethsync 1.8.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "ipc-common-types 1.8.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-hash-fetch 1.8.0", @@ -2374,11 +2390,10 @@ dependencies = [ "ethcore-network 1.8.0", "ethcrypto 0.1.0", "ethkey 0.2.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", + "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "ordered-float 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2518,7 +2533,7 @@ name = "price-info" version = "1.7.0" dependencies = [ "fetch 0.1.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2662,7 +2677,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2698,16 +2713,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "reqwest" -version = "0.6.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper-native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "libflate 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tls 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2802,7 +2823,7 @@ version = "1.4.0" dependencies = [ "bigint 4.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.8.0", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc 1.8.0", "parity-rpc-client 1.4.0", "rpassword 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3228,7 +3249,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3243,61 +3264,28 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "tokio-minihttp" -version = "0.1.0" -source = "git+https://github.com/tomusdrw/tokio-minihttp#67a400060bd29e51beaf206c552845255b6f699f" -dependencies = [ - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", - "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "tokio-named-pipes" version = "0.1.0" source = "git+https://github.com/nikvolf/tokio-named-pipes#0b9b728eaeb0a6673c287ac7692be398fd651752" dependencies = [ "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "tokio-proto" -version = "0.1.0" -source = "git+https://github.com/tomusdrw/tokio-proto#f6ee08cb594fa2fc1b4178eaaca0855d66e68fd3" -dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "tokio-proto" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3314,7 +3302,7 @@ name = "tokio-service" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3322,17 +3310,28 @@ name = "tokio-timer" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-tls" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-uds" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3622,7 +3621,6 @@ dependencies = [ "checksum advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e06588080cb19d0acb6739808aafa5f26bfb2ca015b2b6370028b44cf7cb8a9a" "checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699" "checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6" -"checksum antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5" "checksum app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7d1c0d48a81bbb13043847f957971f4d87c81542d80ece5e84ba3cba4058fd4" "checksum arrayvec 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "699e63a93b79d717e8c3b5eb1b28b7780d0d6d9e59a72eb769291c83b0c8dc67" "checksum aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfdf7355d9db158df68f976ed030ab0f6578af811f5a7bb6dcf221ec24e0e0" @@ -3653,8 +3651,10 @@ dependencies = [ "checksum clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3451e409013178663435d6f15fdb212f14ee4424a3d74f979d081d0a66b6f1f2" "checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32" "checksum clippy 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)" = "5ad3f3dc94d81a6505eb28bf545b501fc9d7525ee9864df5a4b2b6d82629f038" +"checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b" "checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a" "checksum clippy_lints 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)" = "c058b299bb1289c7e8c063bd49477715c91cb3c3344bcf2e25326860b0675654" +"checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96" "checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd" "checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299" "checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591" @@ -3682,7 +3682,7 @@ dependencies = [ "checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" "checksum foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e4056b9bd47f8ac5ba12be771f77a0dae796d1bbaaf5fd0b9c2d38b69b8a29d" "checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866" -"checksum futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4b63a4792d4f8f686defe3b39b92127fea6344de5d38202b2ee5a11bbbf29d6a" +"checksum futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "05a23db7bd162d4e8265968602930c476f688f0c180b44bdaf55e0cb2c687558" "checksum futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "77d49e7de8b91b20d6fda43eea906637eff18b96702eb6b2872df8bfab1ad2b5" "checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb" "checksum getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "65922871abd2f101a2eb0eaebadc66668e54a87ad9c3dd82520b5f86ede5eff9" @@ -3696,7 +3696,8 @@ dependencies = [ "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07" "checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "" "checksum hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "368cb56b2740ebf4230520e2b90ebb0461e69034d85d1945febd9b3971426db2" -"checksum hyper-native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "72332e4a35d3059583623b50e98e491b78f8b96c5521fcb3f428167955aa56e8" +"checksum hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "641abc3e3fcf0de41165595f801376e01106bca1fd876dda937730e477ca004c" +"checksum hyper-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c81fa95203e2a6087242c38691a0210f23e9f3f8f944350bd676522132e2985" "checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" "checksum igd 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "356a0dc23a4fa0f8ce4777258085d00a01ea4923b2efd93538fc44bf5e1bda76" "checksum integer-encoding 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a053c9c7dcb7db1f2aa012c37dc176c62e4cdf14898dee0eecc606de835b8acb" @@ -3706,15 +3707,14 @@ dependencies = [ "checksum itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4833d6978da405305126af4ac88569b5d71ff758581ce5a987dbfa3755f694fc" "checksum itertools 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ab4d6a273c31ef276c917019239588b23bc696f277af8db10742cba3c27ec2f0" "checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c" -"checksum jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" -"checksum jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" +"checksum jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" +"checksum jsonrpc-ipc-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" +"checksum jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" +"checksum jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" +"checksum jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" +"checksum jsonrpc-tcp-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" +"checksum jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf" @@ -3723,8 +3723,8 @@ dependencies = [ "checksum libflate 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "a2aa04ec0100812d31a5366130ff9e793291787bc31da845bede4a00ea329830" "checksum libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)" = "" "checksum libusb-sys 0.2.3 (git+https://github.com/paritytech/libusb-sys)" = "" -"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" "checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939" +"checksum linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d2aab0478615bb586559b0114d94dd8eca4fdbb73b443adcb0d00b61692b4bf" "checksum local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1ceb20f39ff7ae42f3ff9795f3986b1daad821caaa1e1732a0944103a5a1a66" "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" "checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21" @@ -3735,7 +3735,6 @@ dependencies = [ "checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b" "checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" "checksum mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "e3d709ffbb330e1566dc2f2a3c9b58a5ad4a381f740b810cd305dc3f089bc160" -"checksum mime_guess 1.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bbee1a836f344ac39d4a59bfe7be2bd3150353ff71678afb740216f8270b333e" "checksum mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)" = "27a5e6679a0614e25adc14c6434ba84e41632b765a6d9cb2031a0cca682699ae" "checksum miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "609ce024854aeb19a0ef7567d348aaa5a746b32fb72e336df7fcc16869d7e2b4" "checksum mio 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "dbd91d3bfbceb13897065e97b2ef177a09a438cb33612b2d371bf568819a9313" @@ -3805,7 +3804,7 @@ dependencies = [ "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" -"checksum reqwest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1d56dbe269dbe19d716b76ec8c3efce8ef84e974f5b7e5527463e8c0507d4e17" +"checksum reqwest 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5866613d84e2a39c0479a960bf2d0eff1fbfc934f02cd42b5c08c1e1efc5b1fd" "checksum ring 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)" = "24293de46bac74c9b9c05b40ff8496bbc8b9ae242a9b89f754e1154a43bc7c4c" "checksum rocksdb 0.4.5 (git+https://github.com/paritytech/rust-rocksdb)" = "" "checksum rocksdb-sys 0.3.0 (git+https://github.com/paritytech/rust-rocksdb)" = "" @@ -3866,12 +3865,11 @@ dependencies = [ "checksum tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d52d12ad79e4063e0cb0ca5efa202ed7244b6ce4d25f4d3abe410b2a66128292" "checksum tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e85d419699ec4b71bfe35bbc25bb8771e52eff0471a7f75c853ad06e200b4f86" "checksum tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4ab83e7adb5677e42e405fa4ceff75659d93c4d7d7dd22f52fcec59ee9f02af" -"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "" "checksum tokio-named-pipes 0.1.0 (git+https://github.com/nikvolf/tokio-named-pipes)" = "" -"checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "" "checksum tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fbb47ae81353c63c487030659494b295f6cb6576242f907f203473b191b0389" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6131e780037787ff1b3f8aad9da83bca02438b72277850dd6ad0d455e0e20efc" +"checksum tokio-tls 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d88e411cac1c87e405e4090be004493c5d8072a370661033b1a64ea205ec2e13" "checksum tokio-uds 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6116c71be48f8f1656551fd16458247fdd6c03201d7893ad81189055fcde03e8" "checksum toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)" = "0590d72182e50e879c4da3b11c6488dae18fccb1ae0c7a3eda18e16795844796" "checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e" diff --git a/Cargo.toml b/Cargo.toml index 68a288072..65f8aed15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,7 @@ futures-cpupool = "0.1" fdlimit = "0.1" ws2_32-sys = "0.2" ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } -jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } ethsync = { path = "sync" } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } @@ -117,4 +117,4 @@ lto = false panic = "abort" [workspace] -members = ["ethstore/cli", "ethkey/cli", "evmbin", "whisper", "chainspec", "dapps/node-health"] +members = ["ethstore/cli", "ethkey/cli", "evmbin", "whisper", "chainspec", "dapps/js-glue"] diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index cb384d011..ae3216ba3 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -10,25 +10,23 @@ authors = ["Parity Technologies "] [dependencies] base32 = "0.3" futures = "0.1" -linked-hash-map = "0.3" +futures-cpupool = "0.1" +linked-hash-map = "0.5" log = "0.3" -parity-dapps-glue = "1.7" +parity-dapps-glue = "1.8" parking_lot = "0.4" -mime = "0.2" -mime_guess = "1.6.1" +mime_guess = "2.0.0-alpha.2" rand = "0.3" rustc-hex = "1.0" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" -time = "0.1.35" -unicase = "1.3" -url = "1.0" +unicase = "1.4" zip = { version = "0.1", default-features = false } itertools = "0.5" -jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } ethcore-util = { path = "../util" } ethcore-bigint = { path = "../util/bigint" } diff --git a/dapps/js-glue/Cargo.toml b/dapps/js-glue/Cargo.toml index 0f4c92968..126bca143 100644 --- a/dapps/js-glue/Cargo.toml +++ b/dapps/js-glue/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Base Package for all Parity built-in dapps" name = "parity-dapps-glue" -version = "1.7.0" +version = "1.8.0" license = "GPL-3.0" authors = ["Parity Technologies "] build = "build.rs" @@ -12,7 +12,7 @@ syntex = { version = "0.58", optional = true } [dependencies] glob = { version = "0.2.11" } -mime_guess = { version = "1.6.1" } +mime_guess = { version = "2.0.0-alpha.2" } aster = { version = "0.41", default-features = false } quasi = { version = "0.32", default-features = false } quasi_macros = { version = "0.32", optional = true } diff --git a/dapps/node-health/src/health.rs b/dapps/node-health/src/health.rs index 3b3563d6b..ec53d2e29 100644 --- a/dapps/node-health/src/health.rs +++ b/dapps/node-health/src/health.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::time; -use futures::{Future, BoxFuture}; +use futures::Future; use futures::sync::oneshot; use types::{HealthInfo, HealthStatus, Health}; use time::{TimeChecker, MAX_DRIFT}; @@ -44,7 +44,7 @@ impl NodeHealth { } /// Query latest health report. - pub fn health(&self) -> BoxFuture { + pub fn health(&self) -> Box + Send> { trace!(target: "dapps", "Checking node health."); // Check timediff let sync_status = self.sync_status.clone(); @@ -63,7 +63,7 @@ impl NodeHealth { }, ); - rx.map_err(|err| { + Box::new(rx.map_err(|err| { warn!(target: "dapps", "Health request cancelled: {:?}", err); }).and_then(move |time| { // Check peers @@ -117,6 +117,6 @@ impl NodeHealth { }; Ok(Health { peers, sync, time}) - }).boxed() + })) } } diff --git a/dapps/node-health/src/time.rs b/dapps/node-health/src/time.rs index 05c48ce47..78f1fdd5f 100644 --- a/dapps/node-health/src/time.rs +++ b/dapps/node-health/src/time.rs @@ -37,7 +37,7 @@ use std::collections::VecDeque; use std::sync::atomic::{self, AtomicUsize}; use std::sync::Arc; -use futures::{self, Future, BoxFuture}; +use futures::{self, Future}; use futures::future::{self, IntoFuture}; use futures_cpupool::{CpuPool, CpuFuture}; use ntp; @@ -195,6 +195,8 @@ const UPDATE_TIMEOUT_INCOMPLETE_SECS: u64 = 10; /// Maximal valid time drift. pub const MAX_DRIFT: i64 = 500; +type BoxFuture = Box + Send>; + #[derive(Debug, Clone)] /// A time checker. pub struct TimeChecker { @@ -224,7 +226,7 @@ impl TimeChecker where ::Future: Send + 'sta pub fn update(&self) -> BoxFuture { trace!(target: "dapps", "Updating time from NTP."); let last_result = self.last_result.clone(); - self.ntp.drift().into_future().then(move |res| { + Box::new(self.ntp.drift().into_future().then(move |res| { let res = res.map(|d| d.num_milliseconds()); if let Err(Error::NoServersAvailable) = res { @@ -255,7 +257,7 @@ impl TimeChecker where ::Future: Send + 'sta let res = select_result(results.iter()); *last_result.write() = (valid_till, results); res - }).boxed() + })) } /// Returns a current time drift or error if last request to NTP server failed. @@ -264,7 +266,7 @@ impl TimeChecker where ::Future: Send + 'sta { let res = self.last_result.read(); if res.0 > time::Instant::now() { - return futures::done(select_result(res.1.iter())).boxed(); + return Box::new(futures::done(select_result(res.1.iter()))); } } // or update and return result diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 03ba859f8..a9f9af293 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -16,144 +16,82 @@ use std::sync::Arc; -use hyper::{server, net, Decoder, Encoder, Next, Control}; -use hyper::method::Method; -use hyper::status::StatusCode; +use hyper::{Method, StatusCode}; -use api::{response, types}; +use api::response; use apps::fetcher::Fetcher; -use handlers::{self, extract_url}; -use endpoint::{Endpoint, Handler, EndpointPath}; -use node_health::{NodeHealth, HealthStatus, Health}; -use parity_reactor::Remote; +use endpoint::{Endpoint, Request, Response, EndpointPath}; +use futures::{future, Future}; +use node_health::{NodeHealth, HealthStatus}; #[derive(Clone)] pub struct RestApi { fetcher: Arc, health: NodeHealth, - remote: Remote, +} + +impl Endpoint for RestApi { + fn respond(&self, mut path: EndpointPath, req: Request) -> Response { + if let Method::Options = *req.method() { + return Box::new(future::ok(response::empty())); + } + + let endpoint = path.app_params.get(0).map(String::to_owned); + let hash = path.app_params.get(1).map(String::to_owned); + + // at this point path.app_id contains 'api', adjust it to the hash properly, otherwise + // we will try and retrieve 'api' as the hash when doing the /api/content route + if let Some(ref hash) = hash { + path.app_id = hash.to_owned(); + } + + trace!(target: "dapps", "Handling /api request: {:?}/{:?}", endpoint, hash); + match endpoint.as_ref().map(String::as_str) { + Some("ping") => Box::new(future::ok(response::ping(req))), + Some("health") => self.health(), + Some("content") => self.resolve_content(hash.as_ref().map(String::as_str), path, req), + _ => Box::new(future::ok(response::not_found())), + } + } } impl RestApi { pub fn new( fetcher: Arc, health: NodeHealth, - remote: Remote, ) -> Box { Box::new(RestApi { fetcher, health, - remote, }) } -} -impl Endpoint for RestApi { - fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box { - Box::new(RestApiRouter::new((*self).clone(), path, control)) - } -} - -struct RestApiRouter { - api: RestApi, - path: Option, - control: Option, - handler: Box, -} - -impl RestApiRouter { - fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { - RestApiRouter { - path: Some(path), - control: Some(control), - api: api, - handler: Box::new(response::as_json_error(StatusCode::NotFound, &types::ApiError { - code: "404".into(), - title: "Not Found".into(), - detail: "Resource you requested has not been found.".into(), - })), - } - } - - fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, control: Control) -> Option> { + fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, req: Request) -> Response { trace!(target: "dapps", "Resolving content: {:?} from path: {:?}", hash, path); match hash { - Some(hash) if self.api.fetcher.contains(hash) => { - Some(self.api.fetcher.to_async_handler(path, control)) + Some(hash) if self.fetcher.contains(hash) => { + self.fetcher.respond(path, req) }, - _ => None + _ => Box::new(future::ok(response::not_found())), } } - fn health(&self, control: Control) -> Box { - let map = move |health: Result, ()>| { - let status = match health { - Ok(Ok(ref health)) => { - if [&health.peers.status, &health.sync.status].iter().any(|x| *x != &HealthStatus::Ok) { - StatusCode::PreconditionFailed // HTTP 412 - } else { - StatusCode::Ok // HTTP 200 - } - }, - _ => StatusCode::ServiceUnavailable, // HTTP 503 - }; + fn health(&self) -> Response { + Box::new(self.health.health() + .then(|health| { + let status = match health { + Ok(ref health) => { + if [&health.peers.status, &health.sync.status].iter().any(|x| *x != &HealthStatus::Ok) { + StatusCode::PreconditionFailed // HTTP 412 + } else { + StatusCode::Ok // HTTP 200 + } + }, + _ => StatusCode::ServiceUnavailable, // HTTP 503 + }; - response::as_json(status, &health) - }; - let health = self.api.health.health(); - let remote = self.api.remote.clone(); - Box::new(handlers::AsyncHandler::new(health, map, remote, control)) - } -} - -impl server::Handler for RestApiRouter { - fn on_request(&mut self, request: server::Request) -> Next { - if let Method::Options = *request.method() { - self.handler = response::empty(); - return Next::write(); - } - - // TODO [ToDr] Consider using `path.app_params` instead - let url = extract_url(&request); - if url.is_none() { - // Just return 404 if we can't parse URL - return Next::write(); - } - - let url = url.expect("Check for None early-exists above; qed"); - let mut path = self.path.take().expect("on_request called only once, and path is always defined in new; qed"); - let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed"); - - let endpoint = url.path.get(1).map(|v| v.as_str()); - let hash = url.path.get(2).map(|v| v.as_str()); - // at this point path.app_id contains 'api', adjust it to the hash properly, otherwise - // we will try and retrieve 'api' as the hash when doing the /api/content route - if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() } - - let handler = endpoint.and_then(|v| match v { - "ping" => Some(response::ping()), - "health" => Some(self.health(control)), - "content" => self.resolve_content(hash, path, control), - _ => None - }); - - // Overwrite default - if let Some(h) = handler { - self.handler = h; - } - - self.handler.on_request(request) - } - - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - self.handler.on_request_readable(decoder) - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - self.handler.on_response(res) - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - self.handler.on_response_writable(encoder) + Ok(response::as_json(status, &health).into()) + }) + ) } } diff --git a/dapps/src/api/response.rs b/dapps/src/api/response.rs index 6ecc2df60..c8d25c144 100644 --- a/dapps/src/api/response.rs +++ b/dapps/src/api/response.rs @@ -16,27 +16,28 @@ use serde::Serialize; use serde_json; -use hyper::status::StatusCode; +use hyper::{self, mime, StatusCode}; -use endpoint::Handler; use handlers::{ContentHandler, EchoHandler}; -pub fn empty() -> Box { - Box::new(ContentHandler::ok("".into(), mime!(Text/Plain))) +pub fn empty() -> hyper::Response { + ContentHandler::ok("".into(), mime::TEXT_PLAIN).into() } -pub fn as_json(status: StatusCode, val: &T) -> ContentHandler { +pub fn as_json(status: StatusCode, val: &T) -> hyper::Response { let json = serde_json::to_string(val) .expect("serialization to string is infallible; qed"); - ContentHandler::new(status, json, mime!(Application/Json)) + ContentHandler::new(status, json, mime::APPLICATION_JSON).into() } -pub fn as_json_error(status: StatusCode, val: &T) -> ContentHandler { - let json = serde_json::to_string(val) - .expect("serialization to string is infallible; qed"); - ContentHandler::new(status, json, mime!(Application/Json)) +pub fn ping(req: hyper::Request) -> hyper::Response { + EchoHandler::new(req).into() } -pub fn ping() -> Box { - Box::new(EchoHandler::default()) +pub fn not_found() -> hyper::Response { + as_json(StatusCode::NotFound, &::api::types::ApiError { + code: "404".into(), + title: "Not Found".into(), + detail: "Resource you requested has not been found.".into(), + }) } diff --git a/dapps/src/apps/cache.rs b/dapps/src/apps/cache.rs index 5bc01da8e..c81d4d9af 100644 --- a/dapps/src/apps/cache.rs +++ b/dapps/src/apps/cache.rs @@ -19,12 +19,12 @@ use std::fs; use linked_hash_map::LinkedHashMap; -use page::LocalPageEndpoint; +use page::local; use handlers::FetchControl; pub enum ContentStatus { Fetching(FetchControl), - Ready(LocalPageEndpoint), + Ready(local::Dapp), } #[derive(Default)] diff --git a/dapps/src/apps/fetcher/installers.rs b/dapps/src/apps/fetcher/installers.rs index cb7fa1671..11491f72f 100644 --- a/dapps/src/apps/fetcher/installers.rs +++ b/dapps/src/apps/fetcher/installers.rs @@ -18,16 +18,17 @@ use zip; use std::{fs, fmt}; use std::io::{self, Read, Write}; use std::path::PathBuf; -use fetch::{self, Mime}; -use hash::keccak_buffer; use bigint::hash::H256; +use fetch::{self, Mime}; +use futures_cpupool::CpuPool; +use hash::keccak_buffer; -use page::{LocalPageEndpoint, PageCache}; -use handlers::{ContentValidator, ValidatorResponse}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest}; +use handlers::{ContentValidator, ValidatorResponse}; +use page::{local, PageCache}; use Embeddable; -type OnDone = Box) + Send>; +type OnDone = Box) + Send>; fn write_response_and_check_hash( id: &str, @@ -75,15 +76,17 @@ pub struct Content { mime: Mime, content_path: PathBuf, on_done: OnDone, + pool: CpuPool, } impl Content { - pub fn new(id: String, mime: Mime, content_path: PathBuf, on_done: OnDone) -> Self { + pub fn new(id: String, mime: Mime, content_path: PathBuf, on_done: OnDone, pool: CpuPool) -> Self { Content { - id: id, - mime: mime, - content_path: content_path, - on_done: on_done, + id, + mime, + content_path, + on_done, + pool, } } } @@ -91,12 +94,15 @@ impl Content { impl ContentValidator for Content { type Error = ValidationError; - fn validate_and_install(&self, response: fetch::Response) -> Result { - let validate = |content_path: PathBuf| { + fn validate_and_install(self, response: fetch::Response) -> Result { + let pool = self.pool; + let id = self.id.clone(); + let mime = self.mime; + let validate = move |content_path: PathBuf| { // Create dir - let (_, content_path) = write_response_and_check_hash(self.id.as_str(), content_path.clone(), self.id.as_str(), response)?; + let (_, content_path) = write_response_and_check_hash(&id, content_path, &id, response)?; - Ok(LocalPageEndpoint::single_file(content_path, self.mime.clone(), PageCache::Enabled)) + Ok(local::Dapp::single_file(pool, content_path, mime, PageCache::Enabled)) }; // Prepare path for a file @@ -118,15 +124,17 @@ pub struct Dapp { dapps_path: PathBuf, on_done: OnDone, embeddable_on: Embeddable, + pool: CpuPool, } impl Dapp { - pub fn new(id: String, dapps_path: PathBuf, on_done: OnDone, embeddable_on: Embeddable) -> Self { + pub fn new(id: String, dapps_path: PathBuf, on_done: OnDone, embeddable_on: Embeddable, pool: CpuPool) -> Self { Dapp { id, dapps_path, on_done, embeddable_on, + pool, } } @@ -158,16 +166,19 @@ impl Dapp { impl ContentValidator for Dapp { type Error = ValidationError; - fn validate_and_install(&self, response: fetch::Response) -> Result { - let validate = |dapp_path: PathBuf| { - let (file, zip_path) = write_response_and_check_hash(self.id.as_str(), dapp_path.clone(), &format!("{}.zip", self.id), response)?; + fn validate_and_install(self, response: fetch::Response) -> Result { + let id = self.id.clone(); + let pool = self.pool; + let embeddable_on = self.embeddable_on; + let validate = move |dapp_path: PathBuf| { + let (file, zip_path) = write_response_and_check_hash(&id, dapp_path.clone(), &format!("{}.zip", id), response)?; trace!(target: "dapps", "Opening dapp bundle at {:?}", zip_path); // Unpack archive let mut zip = zip::ZipArchive::new(file)?; // First find manifest file let (mut manifest, manifest_dir) = Self::find_manifest(&mut zip)?; // Overwrite id to match hash - manifest.id = self.id.clone(); + manifest.id = id; // Unpack zip for i in 0..zip.len() { @@ -198,7 +209,7 @@ impl ContentValidator for Dapp { let mut manifest_file = fs::File::create(manifest_path)?; manifest_file.write_all(manifest_str.as_bytes())?; // Create endpoint - let endpoint = LocalPageEndpoint::new(dapp_path, manifest.clone().into(), PageCache::Enabled, self.embeddable_on.clone()); + let endpoint = local::Dapp::new(pool, dapp_path, manifest.into(), PageCache::Enabled, embeddable_on); Ok(endpoint) }; diff --git a/dapps/src/apps/fetcher/mod.rs b/dapps/src/apps/fetcher/mod.rs index 8a11f7a9b..effcc19cd 100644 --- a/dapps/src/apps/fetcher/mod.rs +++ b/dapps/src/apps/fetcher/mod.rs @@ -24,27 +24,25 @@ use std::{fs, env}; use std::path::PathBuf; use std::sync::Arc; use rustc_hex::FromHex; +use futures::{future, Future}; +use futures_cpupool::CpuPool; use fetch::{Client as FetchClient, Fetch}; use hash_fetch::urlhint::{URLHintContract, URLHint, URLHintResult}; -use parity_reactor::Remote; -use hyper; -use hyper::status::StatusCode; +use hyper::StatusCode; use {Embeddable, SyncStatus, random_filename}; use parking_lot::Mutex; -use page::LocalPageEndpoint; +use page::local; use handlers::{ContentHandler, ContentFetcherHandler}; -use endpoint::{Endpoint, EndpointPath, Handler}; +use endpoint::{self, Endpoint, EndpointPath}; use apps::cache::{ContentCache, ContentStatus}; /// Limit of cached dapps/content const MAX_CACHED_DAPPS: usize = 20; -pub trait Fetcher: Send + Sync + 'static { +pub trait Fetcher: Endpoint + 'static { fn contains(&self, content_id: &str) -> bool; - - fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box; } pub struct ContentFetcher { @@ -53,8 +51,8 @@ pub struct ContentFetcher>, sync: Arc, embeddable_on: Embeddable, - remote: Remote, fetch: F, + pool: CpuPool, only_content: bool, } @@ -66,24 +64,23 @@ impl Drop for ContentFetcher { } impl ContentFetcher { - pub fn new( resolver: R, - sync_status: Arc, - remote: Remote, + sync: Arc, fetch: F, + pool: CpuPool, ) -> Self { let mut cache_path = env::temp_dir(); cache_path.push(random_filename()); ContentFetcher { - cache_path: cache_path, - resolver: resolver, - sync: sync_status, + cache_path, + resolver, + sync, cache: Arc::new(Mutex::new(ContentCache::default())), embeddable_on: None, - remote: remote, - fetch: fetch, + fetch, + pool, only_content: true, } } @@ -98,24 +95,34 @@ impl ContentFetcher { self } - fn still_syncing(embeddable: Embeddable) -> Box { - Box::new(ContentHandler::error( + fn not_found(embeddable: Embeddable) -> endpoint::Response { + Box::new(future::ok(ContentHandler::error( + StatusCode::NotFound, + "Resource Not Found", + "Requested resource was not found.", + None, + embeddable, + ).into())) + } + + fn still_syncing(embeddable: Embeddable) -> endpoint::Response { + Box::new(future::ok(ContentHandler::error( StatusCode::ServiceUnavailable, "Sync In Progress", "Your node is still syncing. We cannot resolve any content before it's fully synced.", Some("Refresh"), embeddable, - )) + ).into())) } - fn dapps_disabled(address: Embeddable) -> Box { - Box::new(ContentHandler::error( + fn dapps_disabled(address: Embeddable) -> endpoint::Response { + Box::new(future::ok(ContentHandler::error( StatusCode::ServiceUnavailable, "Network Dapps Not Available", "This interface doesn't support network dapps for security reasons.", None, address, - )) + ).into())) } #[cfg(test)] @@ -126,8 +133,6 @@ impl ContentFetcher { // resolve contract call synchronously. // TODO: port to futures-based hyper and make it all async. fn resolve(&self, content_id: Vec) -> Option { - use futures::Future; - self.resolver.resolve(content_id) .wait() .unwrap_or_else(|e| { warn!("Error resolving content-id: {}", e); None }) @@ -151,8 +156,10 @@ impl Fetcher for ContentFetcher { false } } +} - fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box { +impl Endpoint for ContentFetcher { + fn respond(&self, path: EndpointPath, req: endpoint::Request) -> endpoint::Response { let mut cache = self.cache.lock(); let content_id = path.app_id.clone(); @@ -161,12 +168,12 @@ impl Fetcher for ContentFetcher { match status { // Just serve the content Some(&mut ContentStatus::Ready(ref endpoint)) => { - (None, endpoint.to_async_handler(path, control)) + (None, endpoint.to_response(&path)) }, // Content is already being fetched Some(&mut ContentStatus::Fetching(ref fetch_control)) if !fetch_control.is_deadline_reached() => { trace!(target: "dapps", "Content fetching in progress. Waiting..."); - (None, fetch_control.to_async_handler(path, control)) + (None, fetch_control.to_response(path)) }, // We need to start fetching the content _ => { @@ -176,7 +183,7 @@ impl Fetcher for ContentFetcher { let cache = self.cache.clone(); let id = content_id.clone(); - let on_done = move |result: Option| { + let on_done = move |result: Option| { let mut cache = cache.lock(); match result { Some(endpoint) => cache.insert(id.clone(), ContentStatus::Ready(endpoint)), @@ -195,39 +202,39 @@ impl Fetcher for ContentFetcher { }, Some(URLHintResult::Dapp(dapp)) => { let handler = ContentFetcherHandler::new( - dapp.url(), + req.method(), + &dapp.url(), path, - control, installers::Dapp::new( content_id.clone(), self.cache_path.clone(), Box::new(on_done), self.embeddable_on.clone(), + self.pool.clone(), ), self.embeddable_on.clone(), - self.remote.clone(), self.fetch.clone(), ); - (Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as Box) + (Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as endpoint::Response) }, Some(URLHintResult::Content(content)) => { let handler = ContentFetcherHandler::new( - content.url, + req.method(), + &content.url, path, - control, installers::Content::new( content_id.clone(), content.mime, self.cache_path.clone(), Box::new(on_done), + self.pool.clone(), ), self.embeddable_on.clone(), - self.remote.clone(), self.fetch.clone(), ); - (Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as Box) + (Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as endpoint::Response) }, None if self.sync.is_major_importing() => { (None, Self::still_syncing(self.embeddable_on.clone())) @@ -235,13 +242,7 @@ impl Fetcher for ContentFetcher { None => { // This may happen when sync status changes in between // `contains` and `to_handler` - (None, Box::new(ContentHandler::error( - StatusCode::NotFound, - "Resource Not Found", - "Requested resource was not found.", - None, - self.embeddable_on.clone(), - )) as Box) + (None, Self::not_found(self.embeddable_on.clone())) }, } }, @@ -263,13 +264,12 @@ mod tests { use std::sync::Arc; use bytes::Bytes; use fetch::{Fetch, Client}; - use futures::{future, Future, BoxFuture}; - use hash_fetch::urlhint::{URLHint, URLHintResult}; - use parity_reactor::Remote; + use futures::future; + use hash_fetch::urlhint::{URLHint, URLHintResult, BoxFuture}; use apps::cache::ContentStatus; use endpoint::EndpointInfo; - use page::LocalPageEndpoint; + use page::local; use super::{ContentFetcher, Fetcher}; use {SyncStatus}; @@ -277,7 +277,7 @@ mod tests { struct FakeResolver; impl URLHint for FakeResolver { fn resolve(&self, _id: Bytes) -> BoxFuture, String> { - future::ok(None).boxed() + Box::new(future::ok(None)) } } @@ -291,10 +291,16 @@ mod tests { #[test] fn should_true_if_contains_the_app() { // given + let pool = ::futures_cpupool::CpuPool::new(1); let path = env::temp_dir(); - let fetcher = ContentFetcher::new(FakeResolver, Arc::new(FakeSync(false)), Remote::new_sync(), Client::new().unwrap()) - .allow_dapps(true); - let handler = LocalPageEndpoint::new(path, EndpointInfo { + let fetcher = ContentFetcher::new( + FakeResolver, + Arc::new(FakeSync(false)), + Client::new().unwrap(), + pool.clone(), + ).allow_dapps(true); + + let handler = local::Dapp::new(pool, path, EndpointInfo { name: "fake".into(), description: "".into(), version: "".into(), diff --git a/dapps/src/apps/fs.rs b/dapps/src/apps/fs.rs index eb9f65130..8be2dcf58 100644 --- a/dapps/src/apps/fs.rs +++ b/dapps/src/apps/fs.rs @@ -19,9 +19,11 @@ use std::io; use std::io::Read; use std::fs; use std::path::{Path, PathBuf}; -use page::{LocalPageEndpoint, PageCache}; -use endpoint::{Endpoint, EndpointInfo}; +use futures_cpupool::CpuPool; + use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest}; +use endpoint::{Endpoint, EndpointInfo}; +use page::{local, PageCache}; use Embeddable; struct LocalDapp { @@ -61,14 +63,14 @@ fn read_manifest(name: &str, mut path: PathBuf) -> EndpointInfo { /// Returns Dapp Id and Local Dapp Endpoint for given filesystem path. /// Parses the path to extract last component (for name). /// `None` is returned when path is invalid or non-existent. -pub fn local_endpoint>(path: P, embeddable: Embeddable) -> Option<(String, Box)> { +pub fn local_endpoint>(path: P, embeddable: Embeddable, pool: CpuPool) -> Option<(String, Box)> { let path = path.as_ref().to_owned(); path.canonicalize().ok().and_then(|path| { let name = path.file_name().and_then(|name| name.to_str()); name.map(|name| { let dapp = local_dapp(name.into(), path.clone()); - (dapp.id, Box::new(LocalPageEndpoint::new( - dapp.path, dapp.info, PageCache::Disabled, embeddable.clone()) + (dapp.id, Box::new(local::Dapp::new( + pool.clone(), dapp.path, dapp.info, PageCache::Disabled, embeddable.clone()) )) }) }) @@ -86,13 +88,13 @@ fn local_dapp(name: String, path: PathBuf) -> LocalDapp { } /// Returns endpoints for Local Dapps found for given filesystem path. -/// Scans the directory and collects `LocalPageEndpoints`. -pub fn local_endpoints>(dapps_path: P, embeddable: Embeddable) -> BTreeMap> { +/// Scans the directory and collects `local::Dapp`. +pub fn local_endpoints>(dapps_path: P, embeddable: Embeddable, pool: CpuPool) -> BTreeMap> { let mut pages = BTreeMap::>::new(); for dapp in local_dapps(dapps_path.as_ref()) { pages.insert( dapp.id, - Box::new(LocalPageEndpoint::new(dapp.path, dapp.info, PageCache::Disabled, embeddable.clone())) + Box::new(local::Dapp::new(pool.clone(), dapp.path, dapp.info, PageCache::Disabled, embeddable.clone())) ); } pages diff --git a/dapps/src/apps/mod.rs b/dapps/src/apps/mod.rs index c38c6784a..8467a8e71 100644 --- a/dapps/src/apps/mod.rs +++ b/dapps/src/apps/mod.rs @@ -18,12 +18,12 @@ use std::path::PathBuf; use std::sync::Arc; use endpoint::{Endpoints, Endpoint}; -use page::PageEndpoint; +use futures_cpupool::CpuPool; +use page; use proxypac::ProxyPac; use web::Web; use fetch::Fetch; use parity_dapps::WebApp; -use parity_reactor::Remote; use parity_ui; use {WebProxyTokens, ParentFrameSettings}; @@ -43,12 +43,12 @@ pub const UTILS_PATH: &'static str = "parity-utils"; pub const WEB_PATH: &'static str = "web"; pub const URL_REFERER: &'static str = "__referer="; -pub fn utils() -> Box { - Box::new(PageEndpoint::with_prefix(parity_ui::App::default(), UTILS_PATH.to_owned())) +pub fn utils(pool: CpuPool) -> Box { + Box::new(page::builtin::Dapp::new(pool, parity_ui::App::default())) } -pub fn ui() -> Box { - Box::new(PageEndpoint::with_fallback_to_index(parity_ui::App::default())) +pub fn ui(pool: CpuPool) -> Box { + Box::new(page::builtin::Dapp::with_fallback_to_index(pool, parity_ui::App::default())) } pub fn ui_redirection(embeddable: Option) -> Box { @@ -61,14 +61,14 @@ pub fn all_endpoints( dapps_domain: &str, embeddable: Option, web_proxy_tokens: Arc, - remote: Remote, fetch: F, + pool: CpuPool, ) -> (Vec, Endpoints) { // fetch fs dapps at first to avoid overwriting builtins - let mut pages = fs::local_endpoints(dapps_path.clone(), embeddable.clone()); + let mut pages = fs::local_endpoints(dapps_path.clone(), embeddable.clone(), pool.clone()); let local_endpoints: Vec = pages.keys().cloned().collect(); for path in extra_dapps { - if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), embeddable.clone()) { + if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), embeddable.clone(), pool.clone()) { pages.insert(id, endpoint); } else { warn!(target: "dapps", "Ignoring invalid dapp at {}", path.display()); @@ -76,17 +76,17 @@ pub fn all_endpoints( } // NOTE [ToDr] Dapps will be currently embeded on 8180 - insert::(&mut pages, "ui", Embeddable::Yes(embeddable.clone())); + insert::(&mut pages, "ui", Embeddable::Yes(embeddable.clone()), pool.clone()); pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned())); - pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), remote.clone(), fetch.clone())); + pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), fetch.clone())); (local_endpoints, pages) } -fn insert(pages: &mut Endpoints, id: &str, embed_at: Embeddable) { +fn insert(pages: &mut Endpoints, id: &str, embed_at: Embeddable, pool: CpuPool) { pages.insert(id.to_owned(), Box::new(match embed_at { - Embeddable::Yes(address) => PageEndpoint::new_safe_to_embed(T::default(), address), - Embeddable::No => PageEndpoint::new(T::default()), + Embeddable::Yes(address) => page::builtin::Dapp::new_safe_to_embed(pool, T::default(), address), + Embeddable::No => page::builtin::Dapp::new(pool, T::default()), })); } diff --git a/dapps/src/apps/ui.rs b/dapps/src/apps/ui.rs index 06a815a8a..39da14e5b 100644 --- a/dapps/src/apps/ui.rs +++ b/dapps/src/apps/ui.rs @@ -16,9 +16,10 @@ //! UI redirections -use hyper::{Control, StatusCode}; +use hyper::StatusCode; +use futures::future; -use endpoint::{Endpoint, Handler, EndpointPath}; +use endpoint::{Endpoint, Request, Response, EndpointPath}; use {handlers, Embeddable}; /// Redirection to UI server. @@ -37,19 +38,20 @@ impl Redirection { } impl Endpoint for Redirection { - fn to_async_handler(&self, _path: EndpointPath, _control: Control) -> Box { - if let Some(ref frame) = self.embeddable_on { + fn respond(&self, _path: EndpointPath, req: Request) -> Response { + Box::new(future::ok(if let Some(ref frame) = self.embeddable_on { trace!(target: "dapps", "Redirecting to signer interface."); - handlers::Redirection::boxed(&format!("http://{}:{}", &frame.host, frame.port)) + let protocol = req.uri().scheme().unwrap_or("http"); + handlers::Redirection::new(format!("{}://{}:{}", protocol, &frame.host, frame.port)).into() } else { trace!(target: "dapps", "Signer disabled, returning 404."); - Box::new(handlers::ContentHandler::error( + handlers::ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Your homepage is not available when Trusted Signer is disabled.", Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), None, - )) - } + ).into() + })) } } diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index ea5825b74..c612ad5b8 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -18,17 +18,25 @@ use std::collections::BTreeMap; -use hyper::{self, server, net}; +use jsonrpc_core::BoxFuture; +use hyper; #[derive(Debug, PartialEq, Default, Clone)] pub struct EndpointPath { pub app_id: String, pub app_params: Vec, + pub query: Option, pub host: String, pub port: u16, pub using_dapps_domains: bool, } +impl EndpointPath { + pub fn has_no_params(&self) -> bool { + self.app_params.is_empty() || self.app_params.iter().all(|x| x.is_empty()) + } +} + #[derive(Debug, PartialEq, Clone)] pub struct EndpointInfo { pub name: String, @@ -39,16 +47,11 @@ pub struct EndpointInfo { } pub type Endpoints = BTreeMap>; -pub type Handler = server::Handler + Send; +pub type Response = BoxFuture; +pub type Request = hyper::Request; pub trait Endpoint : Send + Sync { fn info(&self) -> Option<&EndpointInfo> { None } - fn to_handler(&self, _path: EndpointPath) -> Box { - panic!("This Endpoint is asynchronous and requires Control object."); - } - - fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box { - self.to_handler(path) - } + fn respond(&self, path: EndpointPath, req: Request) -> Response; } diff --git a/dapps/src/handlers/async.rs b/dapps/src/handlers/async.rs deleted file mode 100644 index d68c55cce..000000000 --- a/dapps/src/handlers/async.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Async Content Handler -//! Temporary solution until we switch to future-based server. -//! Wraps a future and converts it to hyper::server::Handler; - -use std::{mem, time}; -use std::sync::mpsc; -use futures::Future; -use hyper::{server, Decoder, Encoder, Next, Control}; -use hyper::net::HttpStream; - -use handlers::ContentHandler; -use parity_reactor::Remote; - -const TIMEOUT_SECS: u64 = 15; - -enum State { - Initial(F, M, Remote, Control), - Waiting(mpsc::Receiver>, M), - Done(ContentHandler), - Invalid, -} - -pub struct AsyncHandler { - state: State, -} - -impl AsyncHandler { - pub fn new(future: F, map: M, remote: Remote, control: Control) -> Self { - AsyncHandler { - state: State::Initial(future, map, remote, control), - } - } -} - -impl server::Handler for AsyncHandler, M> where - F: Future + Send + 'static, - M: FnOnce(Result, ()>) -> ContentHandler, - T: Send + 'static, - E: Send + 'static, -{ - fn on_request(&mut self, _request: server::Request) -> Next { - if let State::Initial(future, map, remote, control) = mem::replace(&mut self.state, State::Invalid) { - let (tx, rx) = mpsc::sync_channel(1); - let control2 = control.clone(); - let tx2 = tx.clone(); - remote.spawn_with_timeout(move || future.then(move |result| { - // Send a result (ignore errors if the connection was dropped) - let _ = tx.send(Ok(result)); - // Resume handler - let _ = control.ready(Next::read()); - - Ok(()) - }), time::Duration::from_secs(TIMEOUT_SECS), move || { - // Notify about error - let _ = tx2.send(Err(())); - // Resume handler - let _ = control2.ready(Next::read()); - }); - - self.state = State::Waiting(rx, map); - } - - Next::wait() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - if let State::Waiting(rx, map) = mem::replace(&mut self.state, State::Invalid) { - match rx.try_recv() { - Ok(result) => { - self.state = State::Done(map(result)); - }, - Err(err) => { - warn!("Resuming handler in incorrect state: {:?}", err); - } - } - } - - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - if let State::Done(ref mut handler) = self.state { - handler.on_response(res) - } else { - Next::end() - } - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - if let State::Done(ref mut handler) = self.state { - handler.on_response_writable(encoder) - } else { - Next::end() - } - } -} diff --git a/dapps/src/handlers/content.rs b/dapps/src/handlers/content.rs index 300f4b61a..13d1fb0b9 100644 --- a/dapps/src/handlers/content.rs +++ b/dapps/src/handlers/content.rs @@ -16,32 +16,29 @@ //! Simple Content Handler -use hyper::{header, server, Decoder, Encoder, Next}; -use hyper::net::HttpStream; -use hyper::mime::Mime; -use hyper::status::StatusCode; +use hyper::{self, mime, header}; +use hyper::StatusCode; use util::version; use handlers::add_security_headers; use Embeddable; -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ContentHandler { code: StatusCode, content: String, - mimetype: Mime, - write_pos: usize, + mimetype: mime::Mime, safe_to_embed_on: Embeddable, } impl ContentHandler { - pub fn ok(content: String, mimetype: Mime) -> Self { + pub fn ok(content: String, mimetype: mime::Mime) -> Self { Self::new(StatusCode::Ok, content, mimetype) } pub fn html(code: StatusCode, content: String, embeddable_on: Embeddable) -> Self { - Self::new_embeddable(code, content, mime!(Text/Html), embeddable_on) + Self::new_embeddable(code, content, mime::TEXT_HTML, embeddable_on) } pub fn error( @@ -60,57 +57,32 @@ impl ContentHandler { ), embeddable_on) } - pub fn new(code: StatusCode, content: String, mimetype: Mime) -> Self { + pub fn new(code: StatusCode, content: String, mimetype: mime::Mime) -> Self { Self::new_embeddable(code, content, mimetype, None) } pub fn new_embeddable( code: StatusCode, content: String, - mimetype: Mime, + mimetype: mime::Mime, safe_to_embed_on: Embeddable, ) -> Self { ContentHandler { code, content, mimetype, - write_pos: 0, safe_to_embed_on, } } } -impl server::Handler for ContentHandler { - fn on_request(&mut self, _request: server::Request) -> Next { - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - res.set_status(self.code); - res.headers_mut().set(header::ContentType(self.mimetype.clone())); - add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.take()); - Next::write() - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - let bytes = self.content.as_bytes(); - if self.write_pos == bytes.len() { - return Next::end(); - } - - match encoder.write(&bytes[self.write_pos..]) { - Ok(bytes) => { - self.write_pos += bytes; - Next::write() - }, - Err(e) => match e.kind() { - ::std::io::ErrorKind::WouldBlock => Next::write(), - _ => Next::end() - }, - } +impl Into for ContentHandler { + fn into(self) -> hyper::Response { + let mut res = hyper::Response::new() + .with_status(self.code) + .with_header(header::ContentType(self.mimetype)) + .with_body(self.content); + add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on); + res } } diff --git a/dapps/src/handlers/echo.rs b/dapps/src/handlers/echo.rs index 1e00f2eec..6ebac3d35 100644 --- a/dapps/src/handlers/echo.rs +++ b/dapps/src/handlers/echo.rs @@ -16,45 +16,31 @@ //! Echo Handler -use std::io::Read; -use hyper::{server, Decoder, Encoder, Next}; -use hyper::net::HttpStream; -use super::ContentHandler; +use hyper::{self, header}; -#[derive(Default)] +use handlers::add_security_headers; + +#[derive(Debug)] pub struct EchoHandler { - content: String, - handler: Option, + request: hyper::Request, } -impl server::Handler for EchoHandler { - fn on_request(&mut self, _: server::Request) -> Next { - Next::read() - } - - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - match decoder.read_to_string(&mut self.content) { - Ok(0) => { - self.handler = Some(ContentHandler::ok(self.content.clone(), mime!(Application/Json))); - Next::write() - }, - Ok(_) => Next::read(), - Err(e) => match e.kind() { - ::std::io::ErrorKind::WouldBlock => Next::read(), - _ => Next::end(), - } +impl EchoHandler { + pub fn new(request: hyper::Request) -> Self { + EchoHandler { + request, } } +} - fn on_response(&mut self, res: &mut server::Response) -> Next { - self.handler.as_mut() - .expect("handler always set in on_request, which is before now; qed") - .on_response(res) - } +impl Into for EchoHandler { + fn into(self) -> hyper::Response { + let content_type = self.request.headers().get().cloned(); + let mut res = hyper::Response::new() + .with_header(content_type.unwrap_or(header::ContentType::json())) + .with_body(self.request.body()); - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - self.handler.as_mut() - .expect("handler always set in on_request, which is before now; qed") - .on_response_writable(encoder) + add_security_headers(res.headers_mut(), None); + res } } diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index afe1f5083..27429bd01 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -16,57 +16,39 @@ //! Hyper Server Handler that fetches a file during a request (proxy). -use std::fmt; -use std::sync::{mpsc, Arc}; +use std::{fmt, mem}; +use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Instant, Duration}; use fetch::{self, Fetch}; -use futures::Future; -use parity_reactor::Remote; +use futures::sync::oneshot; +use futures::{self, Future}; +use hyper::{self, Method, StatusCode}; +use jsonrpc_core::BoxFuture; use parking_lot::Mutex; -use hyper::{server, Decoder, Encoder, Next, Method, Control}; -use hyper::net::HttpStream; -use hyper::uri::RequestUri; -use hyper::status::StatusCode; - -use endpoint::EndpointPath; +use endpoint::{self, EndpointPath}; use handlers::{ContentHandler, StreamingHandler}; -use page::{LocalPageEndpoint, PageHandlerWaiting}; +use page::local; use {Embeddable}; const FETCH_TIMEOUT: u64 = 300; pub enum ValidatorResponse { - Local(LocalPageEndpoint), + Local(local::Dapp), Streaming(StreamingHandler), } -pub trait ContentValidator: Send + 'static { +pub trait ContentValidator: Sized + Send + 'static { type Error: fmt::Debug + fmt::Display; - fn validate_and_install(&self, fetch::Response) -> Result; + fn validate_and_install(self, fetch::Response) -> Result; } -enum FetchState { - Waiting, - NotStarted(String), - Error(ContentHandler), - InProgress(mpsc::Receiver), - Streaming(StreamingHandler), - Done(LocalPageEndpoint, Box), -} - -enum WaitResult { - Error(ContentHandler), - Done(LocalPageEndpoint), - NonAwaitable, -} - -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct FetchControl { abort: Arc, - listeners: Arc)>>>, + listeners: Arc>>>, deadline: Instant, } @@ -81,14 +63,30 @@ impl Default for FetchControl { } impl FetchControl { + pub fn is_deadline_reached(&self) -> bool { + self.deadline < Instant::now() + } + + pub fn abort(&self) { + self.abort.store(true, Ordering::SeqCst); + } + + pub fn to_response(&self, path: EndpointPath) -> endpoint::Response { + let (tx, receiver) = oneshot::channel(); + self.listeners.lock().push(tx); + + Box::new(WaitingHandler { + path, + state: WaitState::Waiting(receiver), + }) + } + fn notify WaitResult>(&self, status: F) { let mut listeners = self.listeners.lock(); - for (control, sender) in listeners.drain(..) { + for sender in listeners.drain(..) { trace!(target: "dapps", "Resuming request waiting for content..."); - if let Err(e) = sender.send(status()) { - trace!(target: "dapps", "Waiting listener notification failed: {:?}", e); - } else { - let _ = control.ready(Next::read()); + if let Err(_) = sender.send(status()) { + trace!(target: "dapps", "Waiting listener notification failed."); } } } @@ -98,92 +96,79 @@ impl FetchControl { FetchState::Error(ref handler) => self.notify(|| WaitResult::Error(handler.clone())), FetchState::Done(ref endpoint, _) => self.notify(|| WaitResult::Done(endpoint.clone())), FetchState::Streaming(_) => self.notify(|| WaitResult::NonAwaitable), - FetchState::NotStarted(_) | FetchState::InProgress(_) | FetchState::Waiting => {}, + FetchState::InProgress(_) => {}, + FetchState::Empty => {}, } } +} - pub fn is_deadline_reached(&self) -> bool { - self.deadline < Instant::now() - } - pub fn abort(&self) { - self.abort.store(true, Ordering::SeqCst); - } +enum WaitState { + Waiting(oneshot::Receiver), + Done(endpoint::Response), +} - pub fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box + Send> { - let (tx, rx) = mpsc::channel(); - self.listeners.lock().push((control, tx)); - - Box::new(WaitingHandler { - receiver: rx, - state: FetchState::Waiting, - uri: RequestUri::default(), - path: path, - }) - } +#[derive(Debug)] +enum WaitResult { + Error(ContentHandler), + Done(local::Dapp), + NonAwaitable, } pub struct WaitingHandler { - receiver: mpsc::Receiver, - state: FetchState, - uri: RequestUri, path: EndpointPath, + state: WaitState, } -impl server::Handler for WaitingHandler { - fn on_request(&mut self, request: server::Request) -> Next { - self.uri = request.uri().clone(); - Next::wait() - } +impl Future for WaitingHandler { + type Item = hyper::Response; + type Error = hyper::Error; - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - let result = self.receiver.try_recv().ok(); - self.state = match result { - Some(WaitResult::Error(handler)) => FetchState::Error(handler), - Some(WaitResult::Done(endpoint)) => { - let mut page_handler = endpoint.to_page_handler(self.path.clone()); - page_handler.set_uri(&self.uri); - FetchState::Done(endpoint, page_handler) - }, - _ => { - warn!("A result for waiting request was not received."); - FetchState::Waiting - }, - }; + fn poll(&mut self) -> futures::Poll { + loop { + let new_state = match self.state { + WaitState::Waiting(ref mut receiver) => { + let result = try_ready!(receiver.poll().map_err(|_| hyper::Error::Timeout)); - match self.state { - FetchState::Done(_, ref mut handler) => handler.on_request_readable(decoder), - FetchState::Streaming(ref mut handler) => handler.on_request_readable(decoder), - FetchState::Error(ref mut handler) => handler.on_request_readable(decoder), - _ => Next::write(), - } - } + match result { + WaitResult::Error(handler) => { + return Ok(futures::Async::Ready(handler.into())); + }, + WaitResult::NonAwaitable => { + let errors = Errors { embeddable_on: None }; + return Ok(futures::Async::Ready(errors.streaming().into())); + }, + WaitResult::Done(endpoint) => { + WaitState::Done(endpoint.to_response(&self.path).into()) + }, + } + }, + WaitState::Done(ref mut response) => { + return response.poll() + }, + }; - fn on_response(&mut self, res: &mut server::Response) -> Next { - match self.state { - FetchState::Done(_, ref mut handler) => handler.on_response(res), - FetchState::Streaming(ref mut handler) => handler.on_response(res), - FetchState::Error(ref mut handler) => handler.on_response(res), - _ => Next::end(), - } - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - match self.state { - FetchState::Done(_, ref mut handler) => handler.on_response_writable(encoder), - FetchState::Streaming(ref mut handler) => handler.on_response_writable(encoder), - FetchState::Error(ref mut handler) => handler.on_response_writable(encoder), - _ => Next::end(), + self.state = new_state; } } } -#[derive(Clone)] +#[derive(Debug, Clone)] struct Errors { embeddable_on: Embeddable, } impl Errors { + fn streaming(&self) -> ContentHandler { + ContentHandler::error( + StatusCode::BadGateway, + "Streaming Error", + "This content is being streamed in other place.", + None, + self.embeddable_on.clone(), + ) + } + fn download_error(&self, e: E) -> ContentHandler { ContentHandler::error( StatusCode::BadGateway, @@ -225,67 +210,102 @@ impl Errors { } } -pub struct ContentFetcherHandler { +enum FetchState { + Error(ContentHandler), + InProgress(BoxFuture), + Streaming(hyper::Response), + Done(local::Dapp, endpoint::Response), + Empty, +} + +impl fmt::Debug for FetchState { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::FetchState::*; + + write!(fmt, "FetchState(")?; + match *self { + Error(ref error) => write!(fmt, "error: {:?}", error), + InProgress(_) => write!(fmt, "in progress"), + Streaming(ref res) => write!(fmt, "streaming: {:?}", res), + Done(ref endpoint, _) => write!(fmt, "done: {:?}", endpoint), + Empty => write!(fmt, "?"), + }?; + write!(fmt, ")") + } +} + +#[derive(Debug)] +pub struct ContentFetcherHandler { fetch_control: FetchControl, - control: Control, - remote: Remote, status: FetchState, - fetch: F, - installer: Option, - path: EndpointPath, errors: Errors, } -impl ContentFetcherHandler { - pub fn new( - url: String, - path: EndpointPath, - control: Control, - installer: H, - embeddable_on: Embeddable, - remote: Remote, - fetch: F, - ) -> Self { - ContentFetcherHandler { - fetch_control: FetchControl::default(), - control, - remote, - fetch, - status: FetchState::NotStarted(url), - installer: Some(installer), - path, - errors: Errors { - embeddable_on, - }, - } - } - +impl ContentFetcherHandler { pub fn fetch_control(&self) -> FetchControl { self.fetch_control.clone() } - fn fetch_content(&self, uri: RequestUri, url: &str, installer: H) -> mpsc::Receiver { - let (tx, rx) = mpsc::channel(); - let abort = self.fetch_control.abort.clone(); + pub fn new( + method: &hyper::Method, + url: &str, + path: EndpointPath, + installer: H, + embeddable_on: Embeddable, + fetch: F, + ) -> Self { + let fetch_control = FetchControl::default(); + let errors = Errors { embeddable_on }; - let path = self.path.clone(); - let tx2 = tx.clone(); - let control = self.control.clone(); - let errors = self.errors.clone(); + // Validation of method + let status = match *method { + // Start fetching content + Method::Get => { + trace!(target: "dapps", "Fetching content from: {:?}", url); + FetchState::InProgress(Self::fetch_content( + fetch, + url, + fetch_control.abort.clone(), + path, + errors.clone(), + installer, + )) + }, + // or return error + _ => FetchState::Error(errors.method_not_allowed()), + }; - let future = self.fetch.fetch_with_abort(url, abort.into()).then(move |result| { + ContentFetcherHandler { + fetch_control, + status, + errors, + } + } + + fn fetch_content( + fetch: F, + url: &str, + abort: Arc, + path: EndpointPath, + errors: Errors, + installer: H, + ) -> BoxFuture { + // Start fetching the content + let fetch2 = fetch.clone(); + let future = fetch.fetch_with_abort(url, abort.into()).then(move |result| { trace!(target: "dapps", "Fetching content finished. Starting validation: {:?}", result); - let new_state = match result { + Ok(match result { Ok(response) => match installer.validate_and_install(response) { Ok(ValidatorResponse::Local(endpoint)) => { trace!(target: "dapps", "Validation OK. Returning response."); - let mut handler = endpoint.to_page_handler(path); - handler.set_uri(&uri); - FetchState::Done(endpoint, handler) + let response = endpoint.to_response(&path); + FetchState::Done(endpoint, response) }, - Ok(ValidatorResponse::Streaming(handler)) => { + Ok(ValidatorResponse::Streaming(stream)) => { trace!(target: "dapps", "Validation OK. Streaming response."); - FetchState::Streaming(handler) + let (reading, response) = stream.into_response(); + fetch2.process_and_forget(reading); + FetchState::Streaming(response) }, Err(e) => { trace!(target: "dapps", "Error while validating content: {:?}", e); @@ -296,100 +316,55 @@ impl ContentFetcherHandler { warn!(target: "dapps", "Unable to fetch content: {:?}", e); FetchState::Error(errors.download_error(e)) }, - }; - // Content may be resolved when the connection is already dropped. - let _ = tx2.send(new_state); - // Ignoring control errors - let _ = control.ready(Next::read()); - Ok(()) as Result<(), ()> + }) }); // make sure to run within fetch thread pool. - let future = self.fetch.process(future); - // spawn to event loop - let control = self.control.clone(); - let errors = self.errors.clone(); - self.remote.spawn_with_timeout(|| future, Duration::from_secs(FETCH_TIMEOUT), move || { - // Notify about the timeout - let _ = tx.send(FetchState::Error(errors.timeout_error())); - // Ignoring control errors - let _ = control.ready(Next::read()); - }); - - rx + fetch.process(future) } } -impl server::Handler for ContentFetcherHandler { - fn on_request(&mut self, request: server::Request) -> Next { - let status = if let FetchState::NotStarted(ref url) = self.status { - let uri = request.uri().clone(); - let installer = self.installer.take().expect("Installer always set initialy; installer used only in on_request; on_request invoked only once; qed"); +impl Future for ContentFetcherHandler { + type Item = hyper::Response; + type Error = hyper::Error; - Some(match *request.method() { - // Start fetching content - Method::Get => { - trace!(target: "dapps", "Fetching content from: {:?}", url); - let receiver = self.fetch_content(uri, url, installer); - FetchState::InProgress(receiver) + fn poll(&mut self) -> futures::Poll { + loop { + trace!(target: "dapps", "Polling status: {:?}", self.status); + self.status = match mem::replace(&mut self.status, FetchState::Empty) { + FetchState::Error(error) => { + return Ok(futures::Async::Ready(error.into())); }, - // or return error - _ => FetchState::Error(self.errors.method_not_allowed()), - }) - } else { None }; + FetchState::Streaming(response) => { + return Ok(futures::Async::Ready(response)); + }, + any => any, + }; - if let Some(status) = status { + let status = match self.status { + // Request may time out + FetchState::InProgress(_) if self.fetch_control.is_deadline_reached() => { + trace!(target: "dapps", "Fetching dapp failed because of timeout."); + FetchState::Error(self.errors.timeout_error()) + }, + FetchState::InProgress(ref mut receiver) => { + // Check if there is a response + trace!(target: "dapps", "Polling streaming response."); + try_ready!(receiver.poll().map_err(|err| { + warn!(target: "dapps", "Error while fetching response: {:?}", err); + hyper::Error::Timeout + })) + }, + FetchState::Done(_, ref mut response) => { + return response.poll() + }, + FetchState::Empty => panic!("Future polled twice."), + _ => unreachable!(), + }; + + trace!(target: "dapps", "New status: {:?}", status); self.fetch_control.set_status(&status); self.status = status; } - - Next::read() - } - - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - let (status, next) = match self.status { - // Request may time out - FetchState::InProgress(_) if self.fetch_control.is_deadline_reached() => { - trace!(target: "dapps", "Fetching dapp failed because of timeout."); - (Some(FetchState::Error(self.errors.timeout_error())), Next::write()) - }, - FetchState::InProgress(ref receiver) => { - // Check if there is an answer - let rec = receiver.try_recv(); - match rec { - // just return the new state - Ok(state) => (Some(state), Next::write()), - // wait some more - _ => (None, Next::wait()) - } - }, - FetchState::Error(ref mut handler) => (None, handler.on_request_readable(decoder)), - _ => (None, Next::write()), - }; - - if let Some(status) = status { - self.fetch_control.set_status(&status); - self.status = status; - } - - next - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - match self.status { - FetchState::Done(_, ref mut handler) => handler.on_response(res), - FetchState::Streaming(ref mut handler) => handler.on_response(res), - FetchState::Error(ref mut handler) => handler.on_response(res), - _ => Next::end(), - } - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - match self.status { - FetchState::Done(_, ref mut handler) => handler.on_response_writable(encoder), - FetchState::Streaming(ref mut handler) => handler.on_response_writable(encoder), - FetchState::Error(ref mut handler) => handler.on_response_writable(encoder), - _ => Next::end(), - } } } diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index a8beabe84..485000f11 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -16,80 +16,79 @@ //! Hyper handlers implementations. -mod async; mod content; mod echo; mod fetch; +mod reader; mod redirect; mod streaming; -pub use self::async::AsyncHandler; pub use self::content::ContentHandler; pub use self::echo::EchoHandler; pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse}; +pub use self::reader::Reader; pub use self::redirect::Redirection; pub use self::streaming::StreamingHandler; use std::iter; use itertools::Itertools; -use url::Url; -use hyper::{server, header, net, uri}; +use hyper::header; use {apps, address, Embeddable}; /// Adds security-related headers to the Response. pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embeddable) { - headers.set_raw("X-XSS-Protection", vec![b"1; mode=block".to_vec()]); - headers.set_raw("X-Content-Type-Options", vec![b"nosniff".to_vec()]); + headers.set_raw("X-XSS-Protection", "1; mode=block"); + headers.set_raw("X-Content-Type-Options", "nosniff"); // Embedding header: if let None = embeddable_on { - headers.set_raw("X-Frame-Options", vec![b"SAMEORIGIN".to_vec()]); + headers.set_raw("X-Frame-Options", "SAMEORIGIN"); } // Content Security Policy headers - headers.set_raw("Content-Security-Policy", vec![ + headers.set_raw("Content-Security-Policy", String::new() // Allow connecting to WS servers and HTTP(S) servers. // We could be more restrictive and allow only RPC server URL. - b"connect-src http: https: ws: wss:;".to_vec(), + + "connect-src http: https: ws: wss:;" // Allow framing any content from HTTP(S). // Again we could only allow embedding from RPC server URL. // (deprecated) - b"frame-src 'self' http: https:;".to_vec(), + + "frame-src 'self' http: https:;" // Allow framing and web workers from HTTP(S). - b"child-src 'self' http: https:;".to_vec(), + + "child-src 'self' http: https:;" // We allow data: blob: and HTTP(s) images. // We could get rid of wildcarding HTTP and only allow RPC server URL. // (http required for local dapps icons) - b"img-src 'self' 'unsafe-inline' data: blob: http: https:;".to_vec(), + + "img-src 'self' 'unsafe-inline' data: blob: http: https:;" // Allow style from data: blob: and HTTPS. - b"style-src 'self' 'unsafe-inline' data: blob: https:;".to_vec(), + + "style-src 'self' 'unsafe-inline' data: blob: https:;" // Allow fonts from data: and HTTPS. - b"font-src 'self' data: https:;".to_vec(), + + "font-src 'self' data: https:;" // Allow inline scripts and scripts eval (webpack/jsconsole) - { + + { let script_src = embeddable_on.as_ref() .map(|e| e.extra_script_src.iter() .map(|&(ref host, port)| address(host, port)) .join(" ") ).unwrap_or_default(); - format!( + &format!( "script-src 'self' 'unsafe-inline' 'unsafe-eval' {};", script_src - ).into_bytes() - }, + ) + } // Same restrictions as script-src with additional // blob: that is required for camera access (worker) - b"worker-src 'self' 'unsafe-inline' 'unsafe-eval' https: blob:;".to_vec(), + + "worker-src 'self' 'unsafe-inline' 'unsafe-eval' https: blob:;" // Restrict everything else to the same origin. - b"default-src 'self';".to_vec(), + + "default-src 'self';" // Run in sandbox mode (although it's not fully safe since we allow same-origin and script) - b"sandbox allow-same-origin allow-forms allow-modals allow-popups allow-presentation allow-scripts;".to_vec(), + + "sandbox allow-same-origin allow-forms allow-modals allow-popups allow-presentation allow-scripts;" // Disallow subitting forms from any dapps - b"form-action 'none';".to_vec(), + + "form-action 'none';" // Never allow mixed content - b"block-all-mixed-content;".to_vec(), + + "block-all-mixed-content;" // Specify if the site can be embedded. - match embeddable_on { + + &match embeddable_on { Some(ref embed) => { let std = address(&embed.host, embed.port); let proxy = format!("{}.{}", apps::HOME_PAGE, embed.dapps_domain); @@ -112,43 +111,6 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd format!("frame-ancestors {};", ancestors) }, None => format!("frame-ancestors 'self';"), - }.into_bytes(), - ]); -} - - -/// Extracts URL part from the Request. -pub fn extract_url(req: &server::Request) -> Option { - convert_uri_to_url(req.uri(), req.headers().get::()) -} - -/// Extracts URL given URI and Host header. -pub fn convert_uri_to_url(uri: &uri::RequestUri, host: Option<&header::Host>) -> Option { - match *uri { - uri::RequestUri::AbsoluteUri(ref url) => { - match Url::from_generic_url(url.clone()) { - Ok(url) => Some(url), - _ => None, - } - }, - uri::RequestUri::AbsolutePath { ref path, ref query } => { - let query = match *query { - Some(ref query) => format!("?{}", query), - None => "".into(), - }; - // Attempt to prepend the Host header (mandatory in HTTP/1.1) - let url_string = match host { - Some(ref host) => { - format!("http://{}:{}{}{}", host.hostname, host.port.unwrap_or(80), path, query) - }, - None => return None, - }; - - match Url::parse(&url_string) { - Ok(url) => Some(url), - _ => None, - } - }, - _ => None, - } + } + ); } diff --git a/dapps/src/handlers/reader.rs b/dapps/src/handlers/reader.rs new file mode 100644 index 000000000..85a351c7b --- /dev/null +++ b/dapps/src/handlers/reader.rs @@ -0,0 +1,73 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! A chunk-producing io::Read wrapper. + +use std::io::{self, Read}; + +use futures::{self, sink, Sink, Future}; +use futures::sync::mpsc; +use hyper; + +type Sender = mpsc::Sender>; + +const MAX_CHUNK_SIZE: usize = 32 * 1024; + +/// A Reader is essentially a stream of `hyper::Chunks`. +/// The chunks are read from given `io::Read` instance. +/// +/// Unfortunately `hyper` doesn't allow you to pass `Stream` +/// directly to the response, so you need to create +/// a `Body::pair()` and send over chunks using `sink::Send`. +/// Also `Chunks` need to take `Vec` by value, so we need +/// to allocate it for each chunk being sent. +pub struct Reader { + buffer: [u8; MAX_CHUNK_SIZE], + content: io::BufReader, + sending: sink::Send, +} + +impl Reader { + pub fn pair(content: R, initial: Vec) -> (Self, hyper::Body) { + let (tx, rx) = hyper::Body::pair(); + let reader = Reader { + buffer: [0; MAX_CHUNK_SIZE], + content: io::BufReader::new(content), + sending: tx.send(Ok(initial.into())), + }; + + (reader, rx) + } +} + +impl Future for Reader { + type Item = (); + type Error = (); + + fn poll(&mut self) -> futures::Poll { + loop { + let next = try_ready!(self.sending.poll().map_err(|err| { + warn!(target: "dapps", "Unable to send next chunk: {:?}", err); + })); + + self.sending = match self.content.read(&mut self.buffer) { + Ok(0) => return Ok(futures::Async::Ready(())), + Ok(read) => next.send(Ok(self.buffer[..read].to_vec().into())), + Err(err) => next.send(Err(hyper::Error::Io(err))), + } + } + } +} diff --git a/dapps/src/handlers/redirect.rs b/dapps/src/handlers/redirect.rs index 09e8f9c50..cb1eda2dd 100644 --- a/dapps/src/handlers/redirect.rs +++ b/dapps/src/handlers/redirect.rs @@ -16,9 +16,7 @@ //! HTTP Redirection hyper handler -use hyper::{header, server, Decoder, Encoder, Next}; -use hyper::net::HttpStream; -use hyper::status::StatusCode; +use hyper::{self, header, StatusCode}; #[derive(Clone)] pub struct Redirection { @@ -26,36 +24,18 @@ pub struct Redirection { } impl Redirection { - pub fn new(url: &str) -> Self { + pub fn new>(url: T) -> Self { Redirection { - to_url: url.to_owned() + to_url: url.into() } } - - pub fn boxed(url: &str) -> Box { - Box::new(Self::new(url)) - } } -impl server::Handler for Redirection { - fn on_request(&mut self, _request: server::Request) -> Next { - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { +impl Into for Redirection { + fn into(self) -> hyper::Response { // Don't use `MovedPermanently` here to prevent browser from caching the redirections. - res.set_status(StatusCode::Found); - res.headers_mut().set(header::Location(self.to_url.to_owned())); - Next::write() - } - fn on_response_writable(&mut self, _encoder: &mut Encoder) -> Next { - Next::end() + hyper::Response::new() + .with_status(StatusCode::Found) + .with_header(header::Location::new(self.to_url)) } } - - - diff --git a/dapps/src/handlers/streaming.rs b/dapps/src/handlers/streaming.rs index 5981cf221..73f7995b2 100644 --- a/dapps/src/handlers/streaming.rs +++ b/dapps/src/handlers/streaming.rs @@ -16,87 +16,43 @@ //! Content Stream Response -use std::io::{self, Read}; +use std::io; +use hyper::{self, header, mime, StatusCode}; -use hyper::{header, server, Decoder, Encoder, Next}; -use hyper::net::HttpStream; -use hyper::mime::Mime; -use hyper::status::StatusCode; - -use handlers::add_security_headers; +use handlers::{add_security_headers, Reader}; use Embeddable; -const BUFFER_SIZE: usize = 1024; - -pub struct StreamingHandler { - buffer: [u8; BUFFER_SIZE], - buffer_leftover: usize, +pub struct StreamingHandler { + initial: Vec, + content: R, status: StatusCode, - content: io::BufReader, - mimetype: Mime, + mimetype: mime::Mime, safe_to_embed_on: Embeddable, } impl StreamingHandler { - pub fn new(content: R, status: StatusCode, mimetype: Mime, embeddable_on: Embeddable) -> Self { + pub fn new(content: R, status: StatusCode, mimetype: mime::Mime, safe_to_embed_on: Embeddable) -> Self { StreamingHandler { - buffer: [0; BUFFER_SIZE], - buffer_leftover: 0, - status: status, - content: io::BufReader::new(content), - mimetype: mimetype, - safe_to_embed_on: embeddable_on, + initial: Vec::new(), + content, + status, + mimetype, + safe_to_embed_on, } } pub fn set_initial_content(&mut self, content: &str) { - assert_eq!(self.buffer_leftover, 0); - let bytes = content.as_bytes(); - self.buffer_leftover = bytes.len(); - self.buffer[0..self.buffer_leftover].copy_from_slice(bytes); - } -} - -impl server::Handler for StreamingHandler { - fn on_request(&mut self, _request: server::Request) -> Next { - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - res.set_status(self.status); - res.headers_mut().set(header::ContentType(self.mimetype.clone())); - add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.take()); - Next::write() - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - fn handle_error(e: io::Error) -> Next { - match e.kind() { - ::std::io::ErrorKind::WouldBlock => Next::write(), - _ => Next::end(), - } - } - - let write_pos = self.buffer_leftover; - match self.content.read(&mut self.buffer[write_pos..]) { - Err(e) => handle_error(e), - Ok(read) => match encoder.write(&self.buffer[..write_pos + read]) { - Err(e) => handle_error(e), - Ok(0) => Next::end(), - Ok(wrote) => { - self.buffer_leftover = write_pos + read - wrote; - if self.buffer_leftover > 0 { - for i in self.buffer_leftover..write_pos + read { - self.buffer.swap(i, i - self.buffer_leftover); - } - } - Next::write() - }, - }, - } + self.initial = content.as_bytes().to_vec(); + } + + pub fn into_response(self) -> (Reader, hyper::Response) { + let (reader, body) = Reader::pair(self.content, self.initial); + let mut res = hyper::Response::new() + .with_status(self.status) + .with_header(header::ContentType(self.mimetype)) + .with_body(body); + add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on); + + (reader, res) } } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index e579439d0..36b5bec4c 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(feature="nightly", plugin(clippy))] extern crate base32; -extern crate futures; +extern crate futures_cpupool; extern crate itertools; extern crate linked_hash_map; extern crate mime_guess; @@ -29,9 +29,7 @@ extern crate rand; extern crate rustc_hex; extern crate serde; extern crate serde_json; -extern crate time; extern crate unicase; -extern crate url as url_lib; extern crate zip; extern crate jsonrpc_core; @@ -44,14 +42,13 @@ extern crate fetch; extern crate node_health; extern crate parity_dapps_glue as parity_dapps; extern crate parity_hash_fetch as hash_fetch; -extern crate parity_reactor; extern crate parity_ui; extern crate hash; #[macro_use] -extern crate log; +extern crate futures; #[macro_use] -extern crate mime; +extern crate log; #[macro_use] extern crate serde_derive; @@ -59,6 +56,8 @@ extern crate serde_derive; extern crate ethcore_devtools as devtools; #[cfg(test)] extern crate env_logger; +#[cfg(test)] +extern crate parity_reactor; mod endpoint; mod apps; @@ -67,7 +66,6 @@ mod router; mod handlers; mod api; mod proxypac; -mod url; mod web; #[cfg(test)] mod tests; @@ -76,13 +74,12 @@ use std::collections::HashMap; use std::mem; use std::path::PathBuf; use std::sync::Arc; -use parking_lot::RwLock; - +use futures_cpupool::CpuPool; use jsonrpc_http_server::{self as http, hyper, Origin}; +use parking_lot::RwLock; use fetch::Fetch; use node_health::NodeHealth; -use parity_reactor::Remote; pub use hash_fetch::urlhint::ContractClient; pub use node_health::SyncStatus; @@ -105,6 +102,7 @@ pub struct Endpoints { endpoints: Arc>, dapps_path: PathBuf, embeddable: Option, + pool: Option, } impl Endpoints { @@ -117,7 +115,11 @@ impl Endpoints { /// Check for any changes in the local dapps folder and update. pub fn refresh_local_dapps(&self) { - let new_local = apps::fs::local_endpoints(&self.dapps_path, self.embeddable.clone()); + let pool = match self.pool.as_ref() { + None => return, + Some(pool) => pool, + }; + let new_local = apps::fs::local_endpoints(&self.dapps_path, self.embeddable.clone(), pool.clone()); let old_local = mem::replace(&mut *self.local_endpoints.write(), new_local.keys().cloned().collect()); let (_, to_remove): (_, Vec<_>) = old_local .into_iter() @@ -151,8 +153,8 @@ impl Middleware { /// Creates new middleware for UI server. pub fn ui( + pool: CpuPool, health: NodeHealth, - remote: Remote, dapps_domain: &str, registrar: Arc, sync_status: Arc, @@ -161,16 +163,16 @@ impl Middleware { let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), sync_status.clone(), - remote.clone(), fetch.clone(), + pool.clone(), ).embeddable_on(None).allow_dapps(false)); let special = { let mut special = special_endpoints( + pool.clone(), health, content_fetcher.clone(), - remote.clone(), ); - special.insert(router::SpecialEndpoint::Home, Some(apps::ui())); + special.insert(router::SpecialEndpoint::Home, Some(apps::ui(pool.clone()))); special }; let router = router::Router::new( @@ -189,8 +191,8 @@ impl Middleware { /// Creates new Dapps server middleware. pub fn dapps( + pool: CpuPool, health: NodeHealth, - remote: Remote, ui_address: Option<(String, u16)>, extra_embed_on: Vec<(String, u16)>, extra_script_src: Vec<(String, u16)>, @@ -206,8 +208,8 @@ impl Middleware { let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), sync_status.clone(), - remote.clone(), fetch.clone(), + pool.clone(), ).embeddable_on(embeddable.clone()).allow_dapps(true)); let (local_endpoints, endpoints) = apps::all_endpoints( dapps_path.clone(), @@ -215,21 +217,22 @@ impl Middleware { dapps_domain, embeddable.clone(), web_proxy_tokens, - remote.clone(), fetch.clone(), + pool.clone(), ); let endpoints = Endpoints { endpoints: Arc::new(RwLock::new(endpoints)), dapps_path, local_endpoints: Arc::new(RwLock::new(local_endpoints)), embeddable: embeddable.clone(), + pool: Some(pool.clone()), }; let special = { let mut special = special_endpoints( + pool.clone(), health, content_fetcher.clone(), - remote.clone(), ); special.insert( router::SpecialEndpoint::Home, @@ -254,23 +257,22 @@ impl Middleware { } impl http::RequestMiddleware for Middleware { - fn on_request(&self, req: &hyper::server::Request, control: &hyper::Control) -> http::RequestMiddlewareAction { - self.router.on_request(req, control) + fn on_request(&self, req: hyper::Request) -> http::RequestMiddlewareAction { + self.router.on_request(req) } } fn special_endpoints( + pool: CpuPool, health: NodeHealth, content_fetcher: Arc, - remote: Remote, ) -> HashMap>> { let mut special = HashMap::new(); special.insert(router::SpecialEndpoint::Rpc, None); - special.insert(router::SpecialEndpoint::Utils, Some(apps::utils())); + special.insert(router::SpecialEndpoint::Utils, Some(apps::utils(pool))); special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new( content_fetcher, health, - remote, ))); special } diff --git a/dapps/src/page/builtin.rs b/dapps/src/page/builtin.rs index c01c49d21..f1ea50d42 100644 --- a/dapps/src/page/builtin.rs +++ b/dapps/src/page/builtin.rs @@ -14,71 +14,62 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::io; +use futures::future; +use futures_cpupool::CpuPool; +use hyper::mime::{self, Mime}; +use itertools::Itertools; +use parity_dapps::{WebApp, Info}; + +use endpoint::{Endpoint, EndpointInfo, EndpointPath, Request, Response}; use page::{handler, PageCache}; -use std::sync::Arc; -use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler}; -use parity_dapps::{WebApp, File, Info}; use Embeddable; -pub struct PageEndpoint { +pub struct Dapp { + /// futures cpu pool + pool: CpuPool, /// Content of the files - pub app: Arc, - /// Prefix to strip from the path (when `None` deducted from `app_id`) - pub prefix: Option, + app: T, /// Safe to be loaded in frame by other origin. (use wisely!) safe_to_embed_on: Embeddable, info: EndpointInfo, fallback_to_index_html: bool, } -impl PageEndpoint { - /// Creates new `PageEndpoint` for builtin (compile time) Dapp. - pub fn new(app: T) -> Self { +impl Dapp { + /// Creates new `Dapp` for builtin (compile time) Dapp. + pub fn new(pool: CpuPool, app: T) -> Self { let info = app.info(); - PageEndpoint { - app: Arc::new(app), - prefix: None, + Dapp { + pool, + app, safe_to_embed_on: None, info: EndpointInfo::from(info), fallback_to_index_html: false, } } - /// Creates a new `PageEndpoint` for builtin (compile time) Dapp. + /// Creates a new `Dapp` for builtin (compile time) Dapp. /// Instead of returning 404 this endpoint will always server index.html. - pub fn with_fallback_to_index(app: T) -> Self { + pub fn with_fallback_to_index(pool: CpuPool, app: T) -> Self { let info = app.info(); - PageEndpoint { - app: Arc::new(app), - prefix: None, + Dapp { + pool, + app, safe_to_embed_on: None, info: EndpointInfo::from(info), fallback_to_index_html: true, } } - /// Create new `PageEndpoint` and specify prefix that should be removed before looking for a file. - /// It's used only for special endpoints (i.e. `/parity-utils/`) - /// So `/parity-utils/inject.js` will be resolved to `/inject.js` is prefix is set. - pub fn with_prefix(app: T, prefix: String) -> Self { - let info = app.info(); - PageEndpoint { - app: Arc::new(app), - prefix: Some(prefix), - safe_to_embed_on: None, - info: EndpointInfo::from(info), - fallback_to_index_html: false, - } - } - - /// Creates new `PageEndpoint` which can be safely used in iframe + /// Creates new `Dapp` which can be safely used in iframe /// even from different origin. It might be dangerous (clickjacking). /// Use wisely! - pub fn new_safe_to_embed(app: T, address: Embeddable) -> Self { + pub fn new_safe_to_embed(pool: CpuPool, app: T, address: Embeddable) -> Self { let info = app.info(); - PageEndpoint { - app: Arc::new(app), - prefix: None, + Dapp { + pool, + app, safe_to_embed_on: address, info: EndpointInfo::from(info), fallback_to_index_html: false, @@ -86,21 +77,51 @@ impl PageEndpoint { } } -impl Endpoint for PageEndpoint { - +impl Endpoint for Dapp { fn info(&self) -> Option<&EndpointInfo> { Some(&self.info) } - fn to_handler(&self, path: EndpointPath) -> Box { - Box::new(handler::PageHandler { - app: BuiltinDapp::new(self.app.clone(), self.fallback_to_index_html), - prefix: self.prefix.clone(), - path: path, - file: handler::ServedFile::new(self.safe_to_embed_on.clone()), + fn respond(&self, path: EndpointPath, _req: Request) -> Response { + trace!(target: "dapps", "Builtin file path: {:?}", path); + let file_path = if path.has_no_params() { + "index.html".to_owned() + } else { + path.app_params.into_iter().filter(|x| !x.is_empty()).join("/") + }; + trace!(target: "dapps", "Builtin file: {:?}", file_path); + + let file = { + let file = |path| self.app.file(path).map(|file| { + let content_type = match file.content_type.parse() { + Ok(mime) => mime, + Err(_) => { + warn!(target: "dapps", "invalid MIME type: {}", file.content_type); + mime::TEXT_HTML + }, + }; + BuiltinFile { + content_type, + content: io::Cursor::new(file.content), + } + }); + let res = file(&file_path); + if self.fallback_to_index_html { + res.or_else(|| file("index.html")) + } else { + res + } + }; + + let (reader, response) = handler::PageHandler { + file, cache: PageCache::Disabled, safe_to_embed_on: self.safe_to_embed_on.clone(), - }) + }.into_response(); + + self.pool.spawn(reader).forget(); + + Box::new(future::ok(response)) } } @@ -116,66 +137,20 @@ impl From for EndpointInfo { } } -struct BuiltinDapp { - app: Arc, - fallback_to_index_html: bool, + +struct BuiltinFile { + content_type: Mime, + content: io::Cursor<&'static [u8]>, } -impl BuiltinDapp { - fn new(app: Arc, fallback_to_index_html: bool) -> Self { - BuiltinDapp { - app: app, - fallback_to_index_html: fallback_to_index_html, - } - } -} - -impl handler::Dapp for BuiltinDapp { - type DappFile = BuiltinDappFile; - - fn file(&self, path: &str) -> Option { - let file = |path| self.app.file(path).map(|_| { - BuiltinDappFile { - app: self.app.clone(), - path: path.into(), - write_pos: 0, - } - }); - let res = file(path); - if self.fallback_to_index_html { - res.or_else(|| file("index.html")) - } else { - res - } - } -} - -struct BuiltinDappFile { - app: Arc, - path: String, - write_pos: usize, -} - -impl BuiltinDappFile { - fn file(&self) -> &File { - self.app.file(&self.path).expect("Check is done when structure is created.") - } -} - -impl handler::DappFile for BuiltinDappFile { - fn content_type(&self) -> &str { - self.file().content_type - } - - fn is_drained(&self) -> bool { - self.write_pos == self.file().content.len() - } - - fn next_chunk(&mut self) -> &[u8] { - &self.file().content[self.write_pos..] - } - - fn bytes_written(&mut self, bytes: usize) { - self.write_pos += bytes; +impl handler::DappFile for BuiltinFile { + type Reader = io::Cursor<&'static [u8]>; + + fn content_type(&self) -> &Mime { + &self.content_type + } + + fn into_reader(self) -> Self::Reader { + self.content } } diff --git a/dapps/src/page/handler.rs b/dapps/src/page/handler.rs index ba38c64dc..56c9a4e1d 100644 --- a/dapps/src/page/handler.rs +++ b/dapps/src/page/handler.rs @@ -14,61 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use time::{self, Duration}; +use std::io; +use std::time::{Duration, SystemTime}; +use hyper::{self, header, StatusCode}; +use hyper::mime::Mime; -use hyper::header; -use hyper::server; -use hyper::uri::RequestUri; -use hyper::net::HttpStream; -use hyper::status::StatusCode; -use hyper::{Decoder, Encoder, Next}; -use endpoint::EndpointPath; -use handlers::{ContentHandler, add_security_headers}; +use handlers::{Reader, ContentHandler, add_security_headers}; use {Embeddable}; /// Represents a file that can be sent to client. /// Implementation should keep track of bytes already sent internally. -pub trait DappFile: Send { +pub trait DappFile { + /// A reader type returned by this file. + type Reader: io::Read; + /// Returns a content-type of this file. - fn content_type(&self) -> &str; + fn content_type(&self) -> &Mime; - /// Checks if all bytes from that file were written. - fn is_drained(&self) -> bool; - - /// Fetch next chunk to write to the client. - fn next_chunk(&mut self) -> &[u8]; - - /// How many files have been written to the client. - fn bytes_written(&mut self, bytes: usize); -} - -/// Dapp as a (dynamic) set of files. -pub trait Dapp: Send + 'static { - /// File type - type DappFile: DappFile; - - /// Returns file under given path. - fn file(&self, path: &str) -> Option; -} - -/// Currently served by `PageHandler` file -pub enum ServedFile { - /// File from dapp - File(T::DappFile), - /// Error (404) - Error(ContentHandler), -} - -impl ServedFile { - pub fn new(embeddable_on: Embeddable) -> Self { - ServedFile::Error(ContentHandler::error( - StatusCode::NotFound, - "404 Not Found", - "Requested dapp resource was not found.", - None, - embeddable_on, - )) - } + /// Convert this file into io::Read instance. + fn into_reader(self) -> Self::Reader where Self: Sized; } /// Defines what cache headers should be appended to returned resources. @@ -84,194 +48,55 @@ impl Default for PageCache { } } -/// A generic type for `PageHandler` allowing to set the URL. -/// Used by dapps fetching to set the URL after the content was downloaded. -pub trait PageHandlerWaiting: server::Handler + Send { - fn set_uri(&mut self, uri: &RequestUri); -} - /// A handler for a single webapp. /// Resolves correct paths and serves as a plumbing code between /// hyper server and dapp. -pub struct PageHandler { - /// A Dapp. - pub app: T, +pub struct PageHandler { /// File currently being served - pub file: ServedFile, - /// Optional prefix to strip from path. - pub prefix: Option, - /// Requested path. - pub path: EndpointPath, + pub file: Option, /// Flag indicating if the file can be safely embeded (put in iframe). pub safe_to_embed_on: Embeddable, /// Cache settings for this page. pub cache: PageCache, } -impl PageHandlerWaiting for PageHandler { - fn set_uri(&mut self, uri: &RequestUri) { - trace!(target: "dapps", "Setting URI: {:?}", uri); - self.file = match *uri { - RequestUri::AbsolutePath { ref path, .. } => { - self.app.file(&self.extract_path(path)) - }, - RequestUri::AbsoluteUri(ref url) => { - self.app.file(&self.extract_path(url.path())) - }, - _ => None, - }.map_or_else(|| ServedFile::new(self.safe_to_embed_on.clone()), |f| ServedFile::File(f)); - } -} +impl PageHandler { + pub fn into_response(self) -> (Option>, hyper::Response) { + let file = match self.file { + None => return (None, ContentHandler::error( + StatusCode::NotFound, + "File not found", + "Requested file has not been found.", + None, + self.safe_to_embed_on, + ).into()), + Some(file) => file, + }; -impl PageHandler { - fn extract_path(&self, path: &str) -> String { - let app_id = &self.path.app_id; - let prefix = "/".to_owned() + self.prefix.as_ref().unwrap_or(app_id); - let prefix_with_slash = prefix.clone() + "/"; - let query_pos = path.find('?').unwrap_or_else(|| path.len()); + let mut res = hyper::Response::new() + .with_status(StatusCode::Ok); - // Index file support - match path == "/" || path == &prefix || path == &prefix_with_slash { - true => "index.html".to_owned(), - false => if path.starts_with(&prefix_with_slash) { - path[prefix_with_slash.len()..query_pos].to_owned() - } else if path.starts_with("/") { - path[1..query_pos].to_owned() - } else { - path[0..query_pos].to_owned() + // headers + { + let mut headers = res.headers_mut(); + + if let PageCache::Enabled = self.cache { + let validity_secs = 365u32 * 24 * 3600; + let validity = Duration::from_secs(validity_secs as u64); + headers.set(header::CacheControl(vec![ + header::CacheDirective::Public, + header::CacheDirective::MaxAge(validity_secs), + ])); + headers.set(header::Expires(header::HttpDate::from(SystemTime::now() + validity))); } + + headers.set(header::ContentType(file.content_type().to_owned())); + + add_security_headers(&mut headers, self.safe_to_embed_on); } + + let (reader, body) = Reader::pair(file.into_reader(), Vec::new()); + res.set_body(body); + (Some(reader), res) } } - -impl server::Handler for PageHandler { - fn on_request(&mut self, req: server::Request) -> Next { - self.set_uri(req.uri()); - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - match self.file { - ServedFile::File(ref f) => { - res.set_status(StatusCode::Ok); - - if let PageCache::Enabled = self.cache { - let mut headers = res.headers_mut(); - let validity = Duration::days(365); - headers.set(header::CacheControl(vec![ - header::CacheDirective::Public, - header::CacheDirective::MaxAge(validity.num_seconds() as u32), - ])); - headers.set(header::Expires(header::HttpDate(time::now() + validity))); - } - - match f.content_type().parse() { - Ok(mime) => res.headers_mut().set(header::ContentType(mime)), - Err(()) => debug!(target: "dapps", "invalid MIME type: {}", f.content_type()), - } - - // Security headers: - add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.take()); - Next::write() - }, - ServedFile::Error(ref mut handler) => { - handler.on_response(res) - } - } - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - match self.file { - ServedFile::Error(ref mut handler) => handler.on_response_writable(encoder), - ServedFile::File(ref f) if f.is_drained() => Next::end(), - ServedFile::File(ref mut f) => match encoder.write(f.next_chunk()) { - Ok(bytes) => { - f.bytes_written(bytes); - Next::write() - }, - Err(e) => match e.kind() { - ::std::io::ErrorKind::WouldBlock => Next::write(), - _ => Next::end(), - }, - } - } - } -} - - - -#[cfg(test)] -mod test { - use super::*; - - pub struct TestWebAppFile; - - impl DappFile for TestWebAppFile { - fn content_type(&self) -> &str { - unimplemented!() - } - - fn is_drained(&self) -> bool { - unimplemented!() - } - - fn next_chunk(&mut self) -> &[u8] { - unimplemented!() - } - - fn bytes_written(&mut self, _bytes: usize) { - unimplemented!() - } - } - - #[derive(Default)] - pub struct TestWebapp; - - impl Dapp for TestWebapp { - type DappFile = TestWebAppFile; - - fn file(&self, _path: &str) -> Option { - None - } - } -} - -#[test] -fn should_extract_path_with_appid() { - - // given - let path1 = "/"; - let path2= "/test.css"; - let path3 = "/app/myfile.txt"; - let path4 = "/app/myfile.txt?query=123"; - let page_handler = PageHandler { - app: test::TestWebapp, - prefix: None, - path: EndpointPath { - app_id: "app".to_owned(), - app_params: vec![], - host: "".to_owned(), - port: 8080, - using_dapps_domains: true, - }, - file: ServedFile::new(None), - cache: Default::default(), - safe_to_embed_on: None, - }; - - // when - let res1 = page_handler.extract_path(path1); - let res2 = page_handler.extract_path(path2); - let res3 = page_handler.extract_path(path3); - let res4 = page_handler.extract_path(path4); - - // then - assert_eq!(&res1, "index.html"); - assert_eq!(&res2, "test.css"); - assert_eq!(&res3, "myfile.txt"); - assert_eq!(&res4, "myfile.txt"); -} diff --git a/dapps/src/page/local.rs b/dapps/src/page/local.rs index 8d52e86dd..82f1f28f3 100644 --- a/dapps/src/page/local.rs +++ b/dapps/src/page/local.rs @@ -15,16 +15,18 @@ // along with Parity. If not, see . use mime_guess; -use std::io::{Seek, Read, SeekFrom}; -use std::fs; +use std::{fs, fmt}; use std::path::{Path, PathBuf}; -use page::handler::{self, PageCache, PageHandlerWaiting}; -use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler}; -use mime::Mime; +use futures::{future}; +use futures_cpupool::CpuPool; +use page::handler::{self, PageCache}; +use endpoint::{Endpoint, EndpointInfo, EndpointPath, Request, Response}; +use hyper::mime::Mime; use Embeddable; -#[derive(Debug, Clone)] -pub struct LocalPageEndpoint { +#[derive(Clone)] +pub struct Dapp { + pool: CpuPool, path: PathBuf, mime: Option, info: Option, @@ -32,23 +34,37 @@ pub struct LocalPageEndpoint { embeddable_on: Embeddable, } -impl LocalPageEndpoint { - pub fn new(path: PathBuf, info: EndpointInfo, cache: PageCache, embeddable_on: Embeddable) -> Self { - LocalPageEndpoint { - path: path, +impl fmt::Debug for Dapp { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Dapp") + .field("path", &self.path) + .field("mime", &self.mime) + .field("info", &self.info) + .field("cache", &self.cache) + .field("embeddable_on", &self.embeddable_on) + .finish() + } +} + +impl Dapp { + pub fn new(pool: CpuPool, path: PathBuf, info: EndpointInfo, cache: PageCache, embeddable_on: Embeddable) -> Self { + Dapp { + pool, + path, mime: None, info: Some(info), - cache: cache, - embeddable_on: embeddable_on, + cache, + embeddable_on, } } - pub fn single_file(path: PathBuf, mime: Mime, cache: PageCache) -> Self { - LocalPageEndpoint { - path: path, + pub fn single_file(pool: CpuPool, path: PathBuf, mime: Mime, cache: PageCache) -> Self { + Dapp { + pool, + path, mime: Some(mime), info: None, - cache: cache, + cache, embeddable_on: None, } } @@ -57,125 +73,75 @@ impl LocalPageEndpoint { self.path.clone() } - fn page_handler_with_mime(&self, path: EndpointPath, mime: &Mime) -> handler::PageHandler { - handler::PageHandler { - app: LocalSingleFile { path: self.path.clone(), mime: format!("{}", mime) }, - prefix: None, - path: path, - file: handler::ServedFile::new(None), - safe_to_embed_on: self.embeddable_on.clone(), - cache: self.cache, - } - } - - fn page_handler(&self, path: EndpointPath) -> handler::PageHandler { - handler::PageHandler { - app: LocalDapp { path: self.path.clone() }, - prefix: None, - path: path, - file: handler::ServedFile::new(None), - safe_to_embed_on: self.embeddable_on.clone(), - cache: self.cache, - } - } - - pub fn to_page_handler(&self, path: EndpointPath) -> Box { + fn get_file(&self, path: &EndpointPath) -> Option { if let Some(ref mime) = self.mime { - Box::new(self.page_handler_with_mime(path, mime)) - } else { - Box::new(self.page_handler(path)) + return LocalFile::from_path(&self.path, mime.to_owned()); } + + let mut file_path = self.path.to_owned(); + + if path.has_no_params() { + file_path.push("index.html"); + } else { + for part in &path.app_params { + file_path.push(part); + } + } + + let mime = mime_guess::guess_mime_type(&file_path); + LocalFile::from_path(&file_path, mime) + } + + + pub fn to_response(&self, path: &EndpointPath) -> Response { + let (reader, response) = handler::PageHandler { + file: self.get_file(path), + cache: self.cache, + safe_to_embed_on: self.embeddable_on.clone(), + }.into_response(); + + self.pool.spawn(reader).forget(); + + Box::new(future::ok(response)) } } -impl Endpoint for LocalPageEndpoint { +impl Endpoint for Dapp { fn info(&self) -> Option<&EndpointInfo> { self.info.as_ref() } - fn to_handler(&self, path: EndpointPath) -> Box { - if let Some(ref mime) = self.mime { - Box::new(self.page_handler_with_mime(path, mime)) - } else { - Box::new(self.page_handler(path)) - } - } -} - -struct LocalSingleFile { - path: PathBuf, - mime: String, -} - -impl handler::Dapp for LocalSingleFile { - type DappFile = LocalFile; - - fn file(&self, _path: &str) -> Option { - LocalFile::from_path(&self.path, Some(&self.mime)) - } -} - -struct LocalDapp { - path: PathBuf, -} - -impl handler::Dapp for LocalDapp { - type DappFile = LocalFile; - - fn file(&self, file_path: &str) -> Option { - let mut path = self.path.clone(); - for part in file_path.split('/') { - path.push(part); - } - LocalFile::from_path(&path, None) + fn respond(&self, path: EndpointPath, _req: Request) -> Response { + self.to_response(&path) } } struct LocalFile { - content_type: String, - buffer: [u8; 4096], + content_type: Mime, file: fs::File, - len: u64, - pos: u64, } impl LocalFile { - fn from_path>(path: P, mime: Option<&str>) -> Option { + fn from_path>(path: P, content_type: Mime) -> Option { + trace!(target: "dapps", "Local file: {:?}", path.as_ref()); // Check if file exists fs::File::open(&path).ok().map(|file| { - let content_type = mime.map(|mime| mime.to_owned()) - .unwrap_or_else(|| mime_guess::guess_mime_type(path).to_string()); - let len = file.metadata().ok().map_or(0, |meta| meta.len()); LocalFile { - content_type: content_type, - buffer: [0; 4096], - file: file, - pos: 0, - len: len, + content_type, + file, } }) } } impl handler::DappFile for LocalFile { - fn content_type(&self) -> &str { + type Reader = fs::File; + + fn content_type(&self) -> &Mime { &self.content_type } - fn is_drained(&self) -> bool { - self.pos == self.len - } - - fn next_chunk(&mut self) -> &[u8] { - let _ = self.file.seek(SeekFrom::Start(self.pos)); - if let Ok(n) = self.file.read(&mut self.buffer) { - &self.buffer[0..n] - } else { - &self.buffer[0..0] - } - } - - fn bytes_written(&mut self, bytes: usize) { - self.pos += bytes as u64; + fn into_reader(self) -> Self::Reader { + self.file } } diff --git a/dapps/src/page/mod.rs b/dapps/src/page/mod.rs index 868cd00a3..420707bfe 100644 --- a/dapps/src/page/mod.rs +++ b/dapps/src/page/mod.rs @@ -15,11 +15,9 @@ // along with Parity. If not, see . -mod builtin; -mod local; +pub mod builtin; +pub mod local; mod handler; -pub use self::local::LocalPageEndpoint; -pub use self::builtin::PageEndpoint; -pub use self::handler::{PageCache, PageHandlerWaiting}; +pub use self::handler::PageCache; diff --git a/dapps/src/proxypac.rs b/dapps/src/proxypac.rs index 5bffe649c..85ac11423 100644 --- a/dapps/src/proxypac.rs +++ b/dapps/src/proxypac.rs @@ -16,9 +16,11 @@ //! Serving ProxyPac file -use endpoint::{Endpoint, Handler, EndpointPath}; -use handlers::ContentHandler; use apps::HOME_PAGE; +use endpoint::{Endpoint, Request, Response, EndpointPath}; +use futures::future; +use handlers::ContentHandler; +use hyper::mime; use {address, Embeddable}; pub struct ProxyPac { @@ -33,7 +35,7 @@ impl ProxyPac { } impl Endpoint for ProxyPac { - fn to_handler(&self, path: EndpointPath) -> Box { + fn respond(&self, path: EndpointPath, _req: Request) -> Response { let ui = self.embeddable .as_ref() .map(|ref parent| address(&parent.host, parent.port)) @@ -57,7 +59,9 @@ function FindProxyForURL(url, host) {{ "#, HOME_PAGE, self.dapps_domain, path.host, path.port, ui); - Box::new(ContentHandler::ok(content, mime!(Application/Javascript))) + Box::new(future::ok( + ContentHandler::ok(content, mime::TEXT_JAVASCRIPT).into() + )) } } diff --git a/dapps/src/router.rs b/dapps/src/router.rs index 2b74d51df..64617b3fa 100644 --- a/dapps/src/router.rs +++ b/dapps/src/router.rs @@ -17,18 +17,16 @@ //! Router implementation //! Dispatch requests to proper application. -use std::cmp; use std::sync::Arc; use std::collections::HashMap; -use url::{Url, Host}; -use hyper::{self, server, header, Control}; -use hyper::net::HttpStream; +use futures::future; +use hyper::{self, header, Uri}; use jsonrpc_http_server as http; use apps; use apps::fetcher::Fetcher; -use endpoint::{Endpoint, EndpointPath, Handler}; +use endpoint::{self, Endpoint, EndpointPath}; use Endpoints; use handlers; use Embeddable; @@ -43,6 +41,13 @@ pub enum SpecialEndpoint { None, } +enum Response { + Some(endpoint::Response), + None(hyper::Request), +} + +/// An endpoint router. +/// Dispatches the request to particular Endpoint by requested uri/path. pub struct Router { endpoints: Option, fetch: Arc, @@ -52,11 +57,10 @@ pub struct Router { } impl Router { - fn resolve_request(&self, req: &server::Request, control: Control, refresh_dapps: bool) -> (bool, Option>) { + fn resolve_request(&self, req: hyper::Request, refresh_dapps: bool) -> (bool, Response) { // Choose proper handler depending on path / domain - let url = handlers::extract_url(req); - let endpoint = extract_endpoint(&url, &self.dapps_domain); - let referer = extract_referer_endpoint(req, &self.dapps_domain); + let endpoint = extract_endpoint(req.uri(), req.headers().get(), &self.dapps_domain); + let referer = extract_referer_endpoint(&req, &self.dapps_domain); let is_utils = endpoint.1 == SpecialEndpoint::Utils; let is_get_request = *req.method() == hyper::Method::Get; let is_head_request = *req.method() == hyper::Method::Head; @@ -64,47 +68,51 @@ impl Router { .as_ref() .map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp)); - trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); - debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint); + trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", req.uri(), req); + debug!(target: "dapps", "Handling endpoint request: {:?}, referer: {:?}", endpoint, referer); (is_utils, match (endpoint.0, endpoint.1, referer) { // Handle invalid web requests that we can recover from - (ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) + (ref path, SpecialEndpoint::None, Some(ref referer)) if referer.app_id == apps::WEB_PATH && has_dapp(apps::WEB_PATH) && !is_web_endpoint(path) => { - trace!(target: "dapps", "Redirecting to correct web request: {:?}", referer_url); - let len = cmp::min(referer_url.path.len(), 2); // /web// - let base = referer_url.path[..len].join("/"); - let requested = url.map(|u| u.path.join("/")).unwrap_or_default(); - Some(handlers::Redirection::boxed(&format!("/{}/{}", base, requested))) + let token = referer.app_params.get(0).map(String::as_str).unwrap_or(""); + let requested = req.uri().path(); + let query = req.uri().query().map_or_else(String::new, |query| format!("?{}", query)); + let redirect_url = format!("/{}/{}{}{}", apps::WEB_PATH, token, requested, query); + trace!(target: "dapps", "Redirecting to correct web request: {:?}", redirect_url); + Response::Some(Box::new(future::ok( + handlers::Redirection::new(redirect_url).into() + ))) }, // First check special endpoints (ref path, ref endpoint, _) if self.special.contains_key(endpoint) => { trace!(target: "dapps", "Resolving to special endpoint."); - self.special.get(endpoint) - .expect("special known to contain key; qed") - .as_ref() - .map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control)) + let special = self.special.get(endpoint).expect("special known to contain key; qed"); + match *special { + Some(ref special) => Response::Some(special.respond(path.clone().unwrap_or_default(), req)), + None => Response::None(req), + } }, // Then delegate to dapp (Some(ref path), _, _) if has_dapp(&path.app_id) => { trace!(target: "dapps", "Resolving to local/builtin dapp."); - Some(self.endpoints + Response::Some(self.endpoints .as_ref() .expect("endpoints known to be set; qed") .endpoints .read() .get(&path.app_id) .expect("endpoints known to contain key; qed") - .to_async_handler(path.clone(), control)) + .respond(path.clone(), req)) }, // Try to resolve and fetch the dapp (Some(ref path), _, _) if self.fetch.contains(&path.app_id) => { trace!(target: "dapps", "Resolving to fetchable content."); - Some(self.fetch.to_async_handler(path.clone(), control)) + Response::Some(self.fetch.respond(path.clone(), req)) }, // 404 for non-existent content (only if serving endpoints and not homepage) (Some(ref path), _, _) @@ -117,45 +125,50 @@ impl Router { if refresh_dapps { debug!(target: "dapps", "Refreshing dapps and re-trying."); self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps()); - return self.resolve_request(req, control, false) + return self.resolve_request(req, false); } else { - Some(Box::new(handlers::ContentHandler::error( + Response::Some(Box::new(future::ok(handlers::ContentHandler::error( hyper::StatusCode::NotFound, "404 Not Found", "Requested content was not found.", None, self.embeddable_on.clone(), - ))) + ).into()))) } }, // Any other GET|HEAD requests to home page. _ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => { - self.special.get(&SpecialEndpoint::Home) - .expect("special known to contain key; qed") - .as_ref() - .map(|special| special.to_async_handler(Default::default(), control)) + let special = self.special.get(&SpecialEndpoint::Home).expect("special known to contain key; qed"); + match *special { + Some(ref special) => { + let mut endpoint = EndpointPath::default(); + endpoint.app_params = req.uri().path().split('/').map(str::to_owned).collect(); + Response::Some(special.respond(endpoint, req)) + }, + None => Response::None(req), + } }, // RPC by default _ => { trace!(target: "dapps", "Resolving to RPC call."); - None + Response::None(req) } }) } } impl http::RequestMiddleware for Router { - fn on_request(&self, req: &server::Request, control: &Control) -> http::RequestMiddlewareAction { - let control = control.clone(); + fn on_request(&self, req: hyper::Request) -> http::RequestMiddlewareAction { let is_origin_set = req.headers().get::().is_some(); - let (is_utils, handler) = self.resolve_request(req, control, self.endpoints.is_some()); - match handler { - Some(handler) => http::RequestMiddlewareAction::Respond { + let (is_utils, response) = self.resolve_request(req, self.endpoints.is_some()); + match response { + Response::Some(response) => http::RequestMiddlewareAction::Respond { should_validate_hosts: !is_utils, - handler: handler, + response, }, - None => http::RequestMiddlewareAction::Proceed { + Response::None(request) => http::RequestMiddlewareAction::Proceed { should_continue_on_invalid_cors: !is_origin_set, + request, }, } } @@ -186,41 +199,44 @@ fn is_web_endpoint(path: &Option) -> bool { } } -fn extract_referer_endpoint(req: &server::Request, dapps_domain: &str) -> Option<(EndpointPath, Url)> { +fn extract_referer_endpoint(req: &hyper::Request, dapps_domain: &str) -> Option { let referer = req.headers().get::(); - let url = referer.and_then(|referer| Url::parse(&referer.0).ok()); + let url = referer.and_then(|referer| referer.parse().ok()); url.and_then(|url| { - let option = Some(url); - extract_url_referer_endpoint(&option, dapps_domain).or_else(|| { - extract_endpoint(&option, dapps_domain).0.map(|endpoint| (endpoint, option.expect("Just wrapped; qed"))) + extract_url_referer_endpoint(&url, dapps_domain).or_else(|| { + extract_endpoint(&url, None, dapps_domain).0 }) }) } -fn extract_url_referer_endpoint(url: &Option, dapps_domain: &str) -> Option<(EndpointPath, Url)> { - let query = url.as_ref().and_then(|url| url.query.as_ref()); - match (url, query) { - (&Some(ref url), Some(ref query)) if query.starts_with(apps::URL_REFERER) => { - let referer_url = format!("http://{}:{}/{}", url.host, url.port, &query[apps::URL_REFERER.len()..]); +fn extract_url_referer_endpoint(url: &Uri, dapps_domain: &str) -> Option { + let query = url.query(); + match query { + Some(query) if query.starts_with(apps::URL_REFERER) => { + let scheme = url.scheme().unwrap_or("http"); + let host = url.host().unwrap_or("unknown"); + let port = default_port(url, None); + let referer_url = format!("{}://{}:{}/{}", scheme, host, port, &query[apps::URL_REFERER.len()..]); debug!(target: "dapps", "Recovering referer from query parameter: {}", referer_url); - let referer_url = Url::parse(&referer_url).ok(); - extract_endpoint(&referer_url, dapps_domain).0.map(|endpoint| { - (endpoint, referer_url.expect("Endpoint returned only when url `is_some`").clone()) - }) + if let Some(referer_url) = referer_url.parse().ok() { + extract_endpoint(&referer_url, None, dapps_domain).0 + } else { + None + } }, _ => None, } } -fn extract_endpoint(url: &Option, dapps_domain: &str) -> (Option, SpecialEndpoint) { - fn special_endpoint(url: &Url) -> SpecialEndpoint { - if url.path.len() <= 1 { +fn extract_endpoint(url: &Uri, extra_host: Option<&header::Host>, dapps_domain: &str) -> (Option, SpecialEndpoint) { + fn special_endpoint(path: &[&str]) -> SpecialEndpoint { + if path.len() <= 1 { return SpecialEndpoint::None; } - match url.path[0].as_ref() { + match path[0].as_ref() { apps::RPC_PATH => SpecialEndpoint::Rpc, apps::API_PATH => SpecialEndpoint::Api, apps::UTILS_PATH => SpecialEndpoint::Utils, @@ -229,114 +245,162 @@ fn extract_endpoint(url: &Option, dapps_domain: &str) -> (Option match url.host { - Host::Domain(ref domain) if domain.ends_with(dapps_domain) => { - let id = &domain[0..(domain.len() - dapps_domain.len())]; - let (id, params) = if let Some(split) = id.rfind('.') { - let (params, id) = id.split_at(split); - (id[1..].to_owned(), [params.to_owned()].into_iter().chain(&url.path).cloned().collect()) - } else { - (id.to_owned(), url.path.clone()) - }; + let port = default_port(url, extra_host.as_ref().and_then(|h| h.port())); + let host = url.host().or_else(|| extra_host.as_ref().map(|h| h.hostname())); + let query = url.query().map(str::to_owned); + let mut path_segments = url.path().split('/').skip(1).collect::>(); + trace!( + target: "dapps", + "Extracting endpoint from: {:?} (dapps: {}). Got host {:?}:{} with path {:?}", + url, dapps_domain, host, port, path_segments + ); + match host { + Some(host) if host.ends_with(dapps_domain) => { + let id = &host[0..(host.len() - dapps_domain.len())]; + let special = special_endpoint(&path_segments); - (Some(EndpointPath { - app_id: id, - app_params: params, - host: domain.clone(), - port: url.port, - using_dapps_domains: true, - }), special_endpoint(url)) - }, - _ if url.path.len() > 1 => { - let id = url.path[0].to_owned(); - (Some(EndpointPath { - app_id: id, - app_params: url.path[1..].to_vec(), - host: format!("{}", url.host), - port: url.port, - using_dapps_domains: false, - }), special_endpoint(url)) - }, - _ => (None, special_endpoint(url)), + // remove special endpoint id from params + if special != SpecialEndpoint::None { + path_segments.remove(0); + } + + let (app_id, app_params) = if let Some(split) = id.rfind('.') { + let (params, id) = id.split_at(split); + path_segments.insert(0, params); + (id[1..].to_owned(), path_segments) + } else { + (id.to_owned(), path_segments) + }; + + (Some(EndpointPath { + app_id, + app_params: app_params.into_iter().map(Into::into).collect(), + query, + host: host.to_owned(), + port, + using_dapps_domains: true, + }), special) }, - _ => (None, SpecialEndpoint::None) + Some(host) if path_segments.len() > 1 => { + let special = special_endpoint(&path_segments); + let id = path_segments.remove(0); + (Some(EndpointPath { + app_id: id.to_owned(), + app_params: path_segments.into_iter().map(Into::into).collect(), + query, + host: host.to_owned(), + port, + using_dapps_domains: false, + }), special) + }, + _ => (None, special_endpoint(&path_segments)), } } -#[test] -fn should_extract_endpoint() { - let dapps_domain = ".web3.site"; - assert_eq!(extract_endpoint(&None, dapps_domain), (None, SpecialEndpoint::None)); - - // With path prefix - assert_eq!( - extract_endpoint(&Url::parse("http://localhost:8080/status/index.html").ok(), dapps_domain), - (Some(EndpointPath { - app_id: "status".to_owned(), - app_params: vec!["index.html".to_owned()], - host: "localhost".to_owned(), - port: 8080, - using_dapps_domains: false, - }), SpecialEndpoint::None) - ); - - // With path prefix - assert_eq!( - extract_endpoint(&Url::parse("http://localhost:8080/rpc/").ok(), dapps_domain), - (Some(EndpointPath { - app_id: "rpc".to_owned(), - app_params: vec!["".to_owned()], - host: "localhost".to_owned(), - port: 8080, - using_dapps_domains: false, - }), SpecialEndpoint::Rpc) - ); - - assert_eq!( - extract_endpoint(&Url::parse("http://my.status.web3.site/parity-utils/inject.js").ok(), dapps_domain), - (Some(EndpointPath { - app_id: "status".to_owned(), - app_params: vec!["my".to_owned(), "parity-utils".into(), "inject.js".into()], - host: "my.status.web3.site".to_owned(), - port: 80, - using_dapps_domains: true, - }), SpecialEndpoint::Utils) - ); - - // By Subdomain - assert_eq!( - extract_endpoint(&Url::parse("http://status.web3.site/test.html").ok(), dapps_domain), - (Some(EndpointPath { - app_id: "status".to_owned(), - app_params: vec!["test.html".to_owned()], - host: "status.web3.site".to_owned(), - port: 80, - using_dapps_domains: true, - }), SpecialEndpoint::None) - ); - - // RPC by subdomain - assert_eq!( - extract_endpoint(&Url::parse("http://my.status.web3.site/rpc/").ok(), dapps_domain), - (Some(EndpointPath { - app_id: "status".to_owned(), - app_params: vec!["my".to_owned(), "rpc".into(), "".into()], - host: "my.status.web3.site".to_owned(), - port: 80, - using_dapps_domains: true, - }), SpecialEndpoint::Rpc) - ); - - // API by subdomain - assert_eq!( - extract_endpoint(&Url::parse("http://my.status.web3.site/api/").ok(), dapps_domain), - (Some(EndpointPath { - app_id: "status".to_owned(), - app_params: vec!["my".to_owned(), "api".into(), "".into()], - host: "my.status.web3.site".to_owned(), - port: 80, - using_dapps_domains: true, - }), SpecialEndpoint::Api) - ); +fn default_port(url: &Uri, extra_port: Option) -> u16 { + let scheme = url.scheme().unwrap_or("http"); + url.port().or(extra_port).unwrap_or_else(|| match scheme { + "http" => 80, + "https" => 443, + _ => 80, + }) +} + +#[cfg(test)] +mod tests { + use super::{SpecialEndpoint, EndpointPath, extract_endpoint}; + + #[test] + fn should_extract_endpoint() { + let dapps_domain = ".web3.site"; + + // With path prefix + assert_eq!( + extract_endpoint(&"http://localhost:8080/status/index.html?q=1".parse().unwrap(), None, dapps_domain), + (Some(EndpointPath { + app_id: "status".to_owned(), + app_params: vec!["index.html".to_owned()], + query: Some("q=1".into()), + host: "localhost".to_owned(), + port: 8080, + using_dapps_domains: false, + }), SpecialEndpoint::None) + ); + + // With path prefix + assert_eq!( + extract_endpoint(&"http://localhost:8080/rpc/".parse().unwrap(), None, dapps_domain), + (Some(EndpointPath { + app_id: "rpc".to_owned(), + app_params: vec!["".to_owned()], + query: None, + host: "localhost".to_owned(), + port: 8080, + using_dapps_domains: false, + }), SpecialEndpoint::Rpc) + ); + + assert_eq!( + extract_endpoint(&"http://my.status.web3.site/parity-utils/inject.js".parse().unwrap(), None, dapps_domain), + (Some(EndpointPath { + app_id: "status".to_owned(), + app_params: vec!["my".into(), "inject.js".into()], + query: None, + host: "my.status.web3.site".to_owned(), + port: 80, + using_dapps_domains: true, + }), SpecialEndpoint::Utils) + ); + + assert_eq!( + extract_endpoint(&"http://my.status.web3.site/inject.js".parse().unwrap(), None, dapps_domain), + (Some(EndpointPath { + app_id: "status".to_owned(), + app_params: vec!["my".into(), "inject.js".into()], + query: None, + host: "my.status.web3.site".to_owned(), + port: 80, + using_dapps_domains: true, + }), SpecialEndpoint::None) + ); + + // By Subdomain + assert_eq!( + extract_endpoint(&"http://status.web3.site/test.html".parse().unwrap(), None, dapps_domain), + (Some(EndpointPath { + app_id: "status".to_owned(), + app_params: vec!["test.html".to_owned()], + query: None, + host: "status.web3.site".to_owned(), + port: 80, + using_dapps_domains: true, + }), SpecialEndpoint::None) + ); + + // RPC by subdomain + assert_eq!( + extract_endpoint(&"http://my.status.web3.site/rpc/".parse().unwrap(), None, dapps_domain), + (Some(EndpointPath { + app_id: "status".to_owned(), + app_params: vec!["my".into(), "".into()], + query: None, + host: "my.status.web3.site".to_owned(), + port: 80, + using_dapps_domains: true, + }), SpecialEndpoint::Rpc) + ); + + // API by subdomain + assert_eq!( + extract_endpoint(&"http://my.status.web3.site/api/".parse().unwrap(), None, dapps_domain), + (Some(EndpointPath { + app_id: "status".to_owned(), + app_params: vec!["my".into(), "".into()], + query: None, + host: "my.status.web3.site".to_owned(), + port: 80, + using_dapps_domains: true, + }), SpecialEndpoint::Api) + ); + } } diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index b75cd25f2..3ae3f7cbb 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -49,6 +49,7 @@ fn should_handle_ping() { "\ POST /api/ping HTTP/1.1\r\n\ Host: home.parity\r\n\ + Content-Type: application/json\r\n\ Connection: close\r\n\ \r\n\ {} diff --git a/dapps/src/tests/fetch.rs b/dapps/src/tests/fetch.rs index f12323155..9f181d364 100644 --- a/dapps/src/tests/fetch.rs +++ b/dapps/src/tests/fetch.rs @@ -18,7 +18,7 @@ use devtools::http_client; use rustc_hex::FromHex; use tests::helpers::{ serve_with_registrar, serve_with_registrar_and_sync, serve_with_fetch, - serve_with_registrar_and_fetch, serve_with_registrar_and_fetch_and_threads, + serve_with_registrar_and_fetch, request, assert_security_headers_for_embed, }; @@ -171,6 +171,8 @@ fn should_return_fetched_dapp_content() { r#"18

Hello Gavcoin!

+0 + "# ); @@ -257,7 +259,7 @@ fn should_not_request_content_twice() { use std::thread; // given - let (server, fetch, registrar) = serve_with_registrar_and_fetch_and_threads(true); + let (server, fetch, registrar) = serve_with_registrar_and_fetch(); let gavcoin = GAVCOIN_ICON.from_hex().unwrap(); registrar.set_result( "2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e".parse().unwrap(), diff --git a/dapps/src/tests/helpers/fetch.rs b/dapps/src/tests/helpers/fetch.rs index 853d6857e..21d5bf2a7 100644 --- a/dapps/src/tests/helpers/fetch.rs +++ b/dapps/src/tests/helpers/fetch.rs @@ -94,7 +94,7 @@ impl FakeFetch { } impl Fetch for FakeFetch { - type Result = futures::BoxFuture; + type Result = Box + Send>; fn new() -> Result where Self: Sized { Ok(FakeFetch::default()) @@ -117,6 +117,17 @@ impl Fetch for FakeFetch { tx.send(fetch::Response::from_reader(cursor)).unwrap(); }); - rx.map_err(|_| fetch::Error::Aborted).boxed() + Box::new(rx.map_err(|_| fetch::Error::Aborted)) + } + + fn process_and_forget(&self, f: F) where + F: Future + Send + 'static, + I: Send + 'static, + E: Send + 'static, + { + // Spawn the task in a separate thread. + thread::spawn(|| { + let _ = f.wait(); + }); } } diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index 6f4652351..4ee21c8f4 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -22,12 +22,12 @@ use std::sync::Arc; use env_logger::LogBuilder; use jsonrpc_core::IoHandler; use jsonrpc_http_server::{self as http, Host, DomainsValidation}; +use parity_reactor::Remote; use devtools::http_client; use hash_fetch::urlhint::ContractClient; use fetch::{Fetch, Client as FetchClient}; use node_health::{NodeHealth, TimeChecker, CpuPool}; -use parity_reactor::Remote; use {Middleware, SyncStatus, WebProxyTokens}; @@ -55,7 +55,7 @@ fn init_logger() { } } -pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (Server, Arc) where +pub fn init_server(process: F, io: IoHandler) -> (Server, Arc) where F: FnOnce(ServerBuilder) -> ServerBuilder, B: Fetch, { @@ -64,11 +64,9 @@ pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (Server, let mut dapps_path = env::temp_dir(); dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); - let server = process(ServerBuilder::new( - &dapps_path, registrar.clone(), remote, - )) - .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap(); + let mut builder = ServerBuilder::new(&dapps_path, registrar.clone()); + builder.signer_address = Some(("127.0.0.1".into(), SIGNER_PORT)); + let server = process(builder).start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap(); ( server, registrar, @@ -76,34 +74,34 @@ pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (Server, } pub fn serve_with_rpc(io: IoHandler) -> Server { - init_server(|builder| builder, io, Remote::new_sync()).0 + init_server(|builder| builder, io).0 } pub fn serve_hosts(hosts: Option>) -> Server { let hosts = hosts.map(|hosts| hosts.into_iter().map(Into::into).collect()); - init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 + init_server(|mut builder| { + builder.allowed_hosts = hosts.into(); + builder + }, Default::default()).0 } pub fn serve_with_registrar() -> (Server, Arc) { - init_server(|builder| builder, Default::default(), Remote::new_sync()) + init_server(|builder| builder, Default::default()) } pub fn serve_with_registrar_and_sync() -> (Server, Arc) { - init_server(|builder| { - builder.sync_status(Arc::new(FakeSync(true))) - }, Default::default(), Remote::new_sync()) + init_server(|mut builder| { + builder.sync_status = Arc::new(FakeSync(true)); + builder + }, Default::default()) } pub fn serve_with_registrar_and_fetch() -> (Server, FakeFetch, Arc) { - serve_with_registrar_and_fetch_and_threads(false) -} - -pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Server, FakeFetch, Arc) { let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, reg) = init_server(move |builder| { builder.fetch(f.clone()) - }, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); + }, Default::default()); (server, fetch, reg) } @@ -111,19 +109,25 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv pub fn serve_with_fetch(web_token: &'static str, domain: &'static str) -> (Server, FakeFetch) { let fetch = FakeFetch::default(); let f = fetch.clone(); - let (server, _) = init_server(move |builder| { - builder - .fetch(f.clone()) - .web_proxy_tokens(Arc::new(move |token| { - if &token == web_token { Some(domain.into()) } else { None } - })) - }, Default::default(), Remote::new_sync()); + let (server, _) = init_server(move |mut builder| { + builder.web_proxy_tokens = Arc::new(move |token| { + if &token == web_token { Some(domain.into()) } else { None } + }); + builder.fetch(f.clone()) + }, Default::default()); (server, fetch) } pub fn serve() -> Server { - init_server(|builder| builder, Default::default(), Remote::new_sync()).0 + init_server(|builder| builder, Default::default()).0 +} + +pub fn serve_ui() -> Server { + init_server(|mut builder| { + builder.serve_ui = true; + builder + }, Default::default()).0 } pub fn request(server: Server, request: &str) -> http_client::Response { @@ -146,13 +150,13 @@ pub struct ServerBuilder { web_proxy_tokens: Arc, signer_address: Option<(String, u16)>, allowed_hosts: DomainsValidation, - remote: Remote, fetch: Option, + serve_ui: bool, } impl ServerBuilder { /// Construct new dapps server - pub fn new>(dapps_path: P, registrar: Arc, remote: Remote) -> Self { + pub fn new>(dapps_path: P, registrar: Arc) -> Self { ServerBuilder { dapps_path: dapps_path.as_ref().to_owned(), registrar: registrar, @@ -160,8 +164,8 @@ impl ServerBuilder { web_proxy_tokens: Arc::new(|_| None), signer_address: None, allowed_hosts: DomainsValidation::Disabled, - remote: remote, fetch: None, + serve_ui: false, } } } @@ -176,37 +180,11 @@ impl ServerBuilder { web_proxy_tokens: self.web_proxy_tokens, signer_address: self.signer_address, allowed_hosts: self.allowed_hosts, - remote: self.remote, fetch: Some(fetch), + serve_ui: self.serve_ui, } } - /// Change default sync status. - pub fn sync_status(mut self, status: Arc) -> Self { - self.sync_status = status; - self - } - - /// Change default web proxy tokens validator. - pub fn web_proxy_tokens(mut self, tokens: Arc) -> Self { - self.web_proxy_tokens = tokens; - self - } - - /// Change default signer port. - pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self { - self.signer_address = signer_address; - self - } - - /// Change allowed hosts. - /// `None` - All hosts are allowed - /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) - pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { - self.allowed_hosts = allowed_hosts; - self - } - /// Asynchronously start server with no authentication, /// returns result with `Server` handle on success or an error. pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> Result { @@ -221,8 +199,9 @@ impl ServerBuilder { self.registrar, self.sync_status, self.web_proxy_tokens, - self.remote, + Remote::new_sync(), fetch, + self.serve_ui, ) } @@ -254,26 +233,39 @@ impl Server { web_proxy_tokens: Arc, remote: Remote, fetch: F, + serve_ui: bool, ) -> Result { let health = NodeHealth::new( sync_status.clone(), TimeChecker::new::(&[], CpuPool::new(1)), remote.clone(), ); - let middleware = Middleware::dapps( - health, - remote, - signer_address, - vec![], - vec![], - dapps_path, - extra_dapps, - DAPPS_DOMAIN.into(), - registrar, - sync_status, - web_proxy_tokens, - fetch, - ); + let pool = ::futures_cpupool::CpuPool::new(1); + let middleware = if serve_ui { + Middleware::ui( + pool, + health, + DAPPS_DOMAIN.into(), + registrar, + sync_status, + fetch, + ) + } else { + Middleware::dapps( + pool, + health, + signer_address, + vec![], + vec![], + dapps_path, + extra_dapps, + DAPPS_DOMAIN.into(), + registrar, + sync_status, + web_proxy_tokens, + fetch, + ) + }; let mut allowed_hosts: Option> = allowed_hosts.into(); allowed_hosts.as_mut().map(|mut hosts| { @@ -295,9 +287,7 @@ impl Server { pub fn addr(&self) -> &SocketAddr { self.server.as_ref() .expect("server is always Some at the start; it's consumed only when object is dropped; qed") - .addrs() - .first() - .expect("You cannot start the server without binding to at least one address; qed") + .address() } } diff --git a/dapps/src/tests/helpers/registrar.rs b/dapps/src/tests/helpers/registrar.rs index 4df7e31b6..2873c7ae7 100644 --- a/dapps/src/tests/helpers/registrar.rs +++ b/dapps/src/tests/helpers/registrar.rs @@ -17,13 +17,13 @@ use std::str; use std::sync::Arc; use std::collections::HashMap; -use rustc_hex::FromHex; -use hash_fetch::urlhint::ContractClient; use bigint::hash::H256; -use util::Address; use bytes::{Bytes, ToPretty}; +use hash_fetch::urlhint::{ContractClient, BoxFuture}; use parking_lot::Mutex; +use rustc_hex::FromHex; +use util::Address; const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2"; const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000"; @@ -67,7 +67,7 @@ impl ContractClient for FakeRegistrar { Ok(REGISTRAR.parse().unwrap()) } - fn call(&self, address: Address, data: Bytes) -> ::futures::BoxFuture { + fn call(&self, address: Address, data: Bytes) -> BoxFuture { let call = (address.to_hex(), data.to_hex()); self.calls.lock().push(call.clone()); let res = self.responses.lock().get(&call).cloned().expect(&format!("No response for call: {:?}", call)); diff --git a/dapps/src/tests/home.rs b/dapps/src/tests/home.rs new file mode 100644 index 000000000..fa5c5b4c4 --- /dev/null +++ b/dapps/src/tests/home.rs @@ -0,0 +1,62 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use tests::helpers::{serve_ui, request, assert_security_headers}; + +#[test] +fn should_serve_home_js() { + // given + let server = serve_ui(); + + // when + let response = request(server, + "\ + GET /inject.js HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Content-Type", "application/javascript"); + assert_eq!(response.body.contains("function(){"), true, "Expected function in: {}", response.body); + assert_security_headers(&response.headers); +} + +#[test] +fn should_serve_home() { + // given + let server = serve_ui(); + + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Content-Type", "text/html"); + assert_security_headers(&response.headers); +} diff --git a/dapps/src/tests/mod.rs b/dapps/src/tests/mod.rs index 089318483..a47294392 100644 --- a/dapps/src/tests/mod.rs +++ b/dapps/src/tests/mod.rs @@ -20,6 +20,7 @@ mod helpers; mod api; mod fetch; +mod home; mod redirection; mod rpc; mod validation; diff --git a/dapps/src/tests/redirection.rs b/dapps/src/tests/redirection.rs index 81d3ec76c..b7f72009f 100644 --- a/dapps/src/tests/redirection.rs +++ b/dapps/src/tests/redirection.rs @@ -201,6 +201,7 @@ fn should_serve_utils() { // then response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body.contains("function(){"), true); + response.assert_header("Content-Type", "application/javascript"); + assert_eq!(response.body.contains("function(){"), true, "Expected function in: {}", response.body); assert_security_headers(&response.headers); } diff --git a/dapps/src/tests/validation.rs b/dapps/src/tests/validation.rs index fb68cf5ed..bd97c940a 100644 --- a/dapps/src/tests/validation.rs +++ b/dapps/src/tests/validation.rs @@ -33,7 +33,7 @@ fn should_reject_invalid_host() { ); // then - assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); + response.assert_status("HTTP/1.1 403 Forbidden"); assert!(response.body.contains("Provided Host header is not whitelisted."), response.body); } @@ -54,7 +54,7 @@ fn should_allow_valid_host() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + response.assert_status("HTTP/1.1 200 OK"); } #[test] @@ -74,7 +74,7 @@ fn should_serve_dapps_domains() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + response.assert_status("HTTP/1.1 200 OK"); } #[test] @@ -95,5 +95,5 @@ fn should_allow_parity_utils_even_on_invalid_domain() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + response.assert_status("HTTP/1.1 200 OK"); } diff --git a/dapps/src/url.rs b/dapps/src/url.rs deleted file mode 100644 index 23dbfb4fc..000000000 --- a/dapps/src/url.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! HTTP/HTTPS URL type. Based on URL type from Iron library. - -use url_lib::{self}; -pub use url_lib::Host; - -/// HTTP/HTTPS URL type for Iron. -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct Url { - /// Raw url of url - pub raw: url_lib::Url, - - /// The host field of the URL, probably a domain. - pub host: Host, - - /// The connection port. - pub port: u16, - - /// The URL path, the resource to be accessed. - /// - /// A *non-empty* vector encoding the parts of the URL path. - /// Empty entries of `""` correspond to trailing slashes. - pub path: Vec, - - /// The URL query. - pub query: Option, - - /// The URL username field, from the userinfo section of the URL. - /// - /// `None` if the `@` character was not part of the input OR - /// if a blank username was provided. - /// Otherwise, a non-empty string. - pub username: Option, - - /// The URL password field, from the userinfo section of the URL. - /// - /// `None` if the `@` character was not part of the input OR - /// if a blank password was provided. - /// Otherwise, a non-empty string. - pub password: Option, -} - -impl Url { - /// Create a URL from a string. - /// - /// The input must be a valid URL with a special scheme for this to succeed. - /// - /// HTTP and HTTPS are special schemes. - /// - /// See: http://url.spec.whatwg.org/#special-scheme - pub fn parse(input: &str) -> Result { - // Parse the string using rust-url, then convert. - match url_lib::Url::parse(input) { - Ok(raw_url) => Url::from_generic_url(raw_url), - Err(e) => Err(format!("{}", e)) - } - } - - /// Create a `Url` from a `rust-url` `Url`. - pub fn from_generic_url(raw_url: url_lib::Url) -> Result { - // Map empty usernames to None. - let username = match raw_url.username() { - "" => None, - username => Some(username.to_owned()) - }; - - // Map empty passwords to None. - let password = match raw_url.password() { - Some(password) if !password.is_empty() => Some(password.to_owned()), - _ => None, - }; - - let port = raw_url.port_or_known_default().ok_or_else(|| format!("Unknown port for scheme: `{}`", raw_url.scheme()))?; - let host = raw_url.host().ok_or_else(|| "Valid host, because only data:, mailto: protocols does not have host.".to_owned())?.to_owned(); - let path = raw_url.path_segments().ok_or_else(|| "Valid path segments. In HTTP we won't get cannot-be-a-base URLs".to_owned())? - .map(|part| part.to_owned()).collect(); - let query = raw_url.query().map(|x| x.to_owned()); - - Ok(Url { - port: port, - host: host, - path: path, - query: query, - raw: raw_url, - username: username, - password: password, - }) - } -} - -#[cfg(test)] -mod test { - use super::Url; - - #[test] - fn test_default_port() { - assert_eq!(Url::parse("http://example.com/wow").unwrap().port, 80u16); - assert_eq!(Url::parse("https://example.com/wow").unwrap().port, 443u16); - } - - #[test] - fn test_explicit_port() { - assert_eq!(Url::parse("http://localhost:3097").unwrap().port, 3097u16); - } - - #[test] - fn test_empty_username() { - assert!(Url::parse("http://@example.com").unwrap().username.is_none()); - assert!(Url::parse("http://:password@example.com").unwrap().username.is_none()); - } - - #[test] - fn test_not_empty_username() { - let user = Url::parse("http://john:pass@example.com").unwrap().username; - assert_eq!(user.unwrap(), "john"); - - let user = Url::parse("http://john:@example.com").unwrap().username; - assert_eq!(user.unwrap(), "john"); - } - - #[test] - fn test_empty_password() { - assert!(Url::parse("http://michael@example.com").unwrap().password.is_none()); - assert!(Url::parse("http://:@example.com").unwrap().password.is_none()); - } - - #[test] - fn test_not_empty_password() { - let pass = Url::parse("http://michael:pass@example.com").unwrap().password; - assert_eq!(pass.unwrap(), "pass"); - - let pass = Url::parse("http://:pass@example.com").unwrap().password; - assert_eq!(pass.unwrap(), "pass"); - } -} diff --git a/dapps/src/web.rs b/dapps/src/web.rs index 5222f51b5..395ab6deb 100644 --- a/dapps/src/web.rs +++ b/dapps/src/web.rs @@ -17,26 +17,23 @@ //! Serving web-based content (proxying) use std::sync::Arc; -use fetch::{self, Fetch}; -use parity_reactor::Remote; use base32; -use hyper::{self, server, net, Next, Encoder, Decoder}; -use hyper::status::StatusCode; +use fetch::{self, Fetch}; +use hyper::{mime, StatusCode}; use apps; -use endpoint::{Endpoint, Handler, EndpointPath}; +use endpoint::{Endpoint, EndpointPath, Request, Response}; +use futures::future; use handlers::{ ContentFetcherHandler, ContentHandler, ContentValidator, ValidatorResponse, - StreamingHandler, extract_url, + StreamingHandler, }; -use url::Url; use {Embeddable, WebProxyTokens}; pub struct Web { embeddable_on: Embeddable, web_proxy_tokens: Arc, - remote: Remote, fetch: F, } @@ -44,92 +41,27 @@ impl Web { pub fn boxed( embeddable_on: Embeddable, web_proxy_tokens: Arc, - remote: Remote, fetch: F, ) -> Box { Box::new(Web { embeddable_on, web_proxy_tokens, - remote, fetch, }) } -} -impl Endpoint for Web { - fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box { - Box::new(WebHandler { - control: control, - state: State::Initial, - path: path, - remote: self.remote.clone(), - fetch: self.fetch.clone(), - web_proxy_tokens: self.web_proxy_tokens.clone(), - embeddable_on: self.embeddable_on.clone(), - }) - } -} - -struct WebInstaller { - embeddable_on: Embeddable, - referer: String, -} - -impl ContentValidator for WebInstaller { - type Error = String; - - fn validate_and_install(&self, response: fetch::Response) -> Result { - let status = StatusCode::from_u16(response.status().to_u16()); - let is_html = response.is_html(); - let mime = response.content_type().unwrap_or(mime!(Text/Html)); - let mut handler = StreamingHandler::new( - response, - status, - mime, - self.embeddable_on.clone(), - ); - if is_html { - handler.set_initial_content(&format!( - r#""#, - apps::UTILS_PATH, - apps::URL_REFERER, - apps::WEB_PATH, - &self.referer, - )); - } - Ok(ValidatorResponse::Streaming(handler)) - } -} - -enum State { - Initial, - Error(ContentHandler), - Fetching(ContentFetcherHandler), -} - -struct WebHandler { - control: hyper::Control, - state: State, - path: EndpointPath, - remote: Remote, - fetch: F, - web_proxy_tokens: Arc, - embeddable_on: Embeddable, -} - -impl WebHandler { - fn extract_target_url(&self, url: Option) -> Result> { - let token_and_url = self.path.app_params.get(0) + fn extract_target_url(&self, path: &EndpointPath) -> Result { + let token_and_url = path.app_params.get(0) .map(|encoded| encoded.replace('.', "")) .and_then(|encoded| base32::decode(base32::Alphabet::Crockford, &encoded.to_uppercase())) .and_then(|data| String::from_utf8(data).ok()) - .ok_or_else(|| State::Error(ContentHandler::error( + .ok_or_else(|| ContentHandler::error( StatusCode::BadRequest, "Invalid parameter", "Couldn't parse given parameter:", - self.path.app_params.get(0).map(String::as_str), + path.app_params.get(0).map(String::as_str), self.embeddable_on.clone() - )))?; + ))?; let mut token_it = token_and_url.split('+'); let token = token_it.next(); @@ -139,9 +71,9 @@ impl WebHandler { let domain = match token.and_then(|token| self.web_proxy_tokens.domain(token)) { Some(domain) => domain, _ => { - return Err(State::Error(ContentHandler::error( + return Err(ContentHandler::error( StatusCode::BadRequest, "Invalid Access Token", "Invalid or old web proxy access token supplied.", Some("Try refreshing the page."), self.embeddable_on.clone() - ))); + )); } }; @@ -149,95 +81,86 @@ impl WebHandler { let mut target_url = match target_url { Some(url) if url.starts_with("http://") || url.starts_with("https://") => url.to_owned(), _ => { - return Err(State::Error(ContentHandler::error( + return Err(ContentHandler::error( StatusCode::BadRequest, "Invalid Protocol", "Invalid protocol used.", None, self.embeddable_on.clone() - ))); + )); } }; if !target_url.starts_with(&*domain) { - return Err(State::Error(ContentHandler::error( + return Err(ContentHandler::error( StatusCode::BadRequest, "Invalid Domain", "Dapp attempted to access invalid domain.", Some(&target_url), self.embeddable_on.clone(), - ))); + )); } if !target_url.ends_with("/") { target_url = format!("{}/", target_url); } - // TODO [ToDr] Should just use `path.app_params` - let (path, query) = match (&url, self.path.using_dapps_domains) { - (&Some(ref url), true) => (&url.path[..], &url.query), - (&Some(ref url), false) => (&url.path[2..], &url.query), - _ => { - return Err(State::Error(ContentHandler::error( - StatusCode::BadRequest, "Invalid URL", "Couldn't parse URL", None, self.embeddable_on.clone() - ))); - } - }; + // Skip the token + let query = path.query.as_ref().map_or_else(String::new, |query| format!("?{}", query)); + let path = path.app_params[1..].join("/"); - let query = match *query { - Some(ref query) => format!("?{}", query), - None => "".into(), - }; - - Ok(format!("{}{}{}", target_url, path.join("/"), query)) + Ok(format!("{}{}{}", target_url, path, query)) } } -impl server::Handler for WebHandler { - fn on_request(&mut self, request: server::Request) -> Next { - let url = extract_url(&request); +impl Endpoint for Web { + fn respond(&self, path: EndpointPath, req: Request) -> Response { // First extract the URL (reject invalid URLs) - let target_url = match self.extract_target_url(url) { + let target_url = match self.extract_target_url(&path) { Ok(url) => url, - Err(error) => { - self.state = error; - return Next::write(); + Err(response) => { + return Box::new(future::ok(response.into())); } }; - let mut handler = ContentFetcherHandler::new( - target_url, - self.path.clone(), - self.control.clone(), + let token = path.app_params.get(0) + .expect("`target_url` is valid; app_params is not empty;qed") + .to_owned(); + + Box::new(ContentFetcherHandler::new( + req.method(), + &target_url, + path, WebInstaller { embeddable_on: self.embeddable_on.clone(), - referer: self.path.app_params.get(0) - .expect("`target_url` is valid; app_params is not empty;qed") - .to_owned(), + token, }, self.embeddable_on.clone(), - self.remote.clone(), self.fetch.clone(), - ); - let res = handler.on_request(request); - self.state = State::Fetching(handler); - - res - } - - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - match self.state { - State::Initial => Next::end(), - State::Error(ref mut handler) => handler.on_request_readable(decoder), - State::Fetching(ref mut handler) => handler.on_request_readable(decoder), - } - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - match self.state { - State::Initial => Next::end(), - State::Error(ref mut handler) => handler.on_response(res), - State::Fetching(ref mut handler) => handler.on_response(res), - } - } - - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - match self.state { - State::Initial => Next::end(), - State::Error(ref mut handler) => handler.on_response_writable(encoder), - State::Fetching(ref mut handler) => handler.on_response_writable(encoder), - } + )) } } + +struct WebInstaller { + embeddable_on: Embeddable, + token: String, +} + +impl ContentValidator for WebInstaller { + type Error = String; + + fn validate_and_install(self, response: fetch::Response) -> Result { + let status = response.status(); + let is_html = response.is_html(); + let mime = response.content_type().unwrap_or(mime::TEXT_HTML); + let mut handler = StreamingHandler::new( + response, + status, + mime, + self.embeddable_on, + ); + if is_html { + handler.set_initial_content(&format!( + r#""#, + apps::UTILS_PATH, + apps::URL_REFERER, + apps::WEB_PATH, + &self.token, + )); + } + Ok(ValidatorResponse::Streaming(handler)) + } +} + diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index d76c07dbe..4d7851b07 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -12,15 +12,12 @@ build = "build.rs" [dependencies] ansi_term = "0.9" -bit-set = "0.4" bloomchain = "0.1" bn = { git = "https://github.com/paritytech/bn" } byteorder = "1.0" clippy = { version = "0.0.103", optional = true} common-types = { path = "types" } crossbeam = "0.2.9" -env_logger = "0.4" -ethabi = "2.0" ethash = { path = "../ethash" } ethcore-bloom-journal = { path = "../util/bloom" } ethcore-bytes = { path = "../util/bytes" } @@ -45,9 +42,9 @@ heapsize = "0.4" hyper = { git = "https://github.com/paritytech/hyper", default-features = false } itertools = "0.5" lazy_static = "0.2" -linked-hash-map = "0.3.0" +linked-hash-map = "0.5" log = "0.3" -lru-cache = "0.1.0" +lru-cache = "0.1" native-contracts = { path = "native_contracts" } num = "0.1" num_cpus = "1.2" @@ -60,7 +57,6 @@ rlp = { path = "../util/rlp" } rlp_derive = { path = "../util/rlp_derive" } rust-crypto = "0.2.34" rustc-hex = "1.0" -semver = "0.6" stats = { path = "../util/stats" } time = "0.1" transient-hashmap = "0.4" diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs index 8de7555d2..458947313 100644 --- a/ethcore/native_contracts/generator/src/lib.rs +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -46,10 +46,12 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result { Ok(format!(r##" use byteorder::{{BigEndian, ByteOrder}}; -use futures::{{future, Future, IntoFuture, BoxFuture}}; +use futures::{{future, Future, IntoFuture}}; use ethabi::{{Contract, Interface, Token, Event}}; use bigint; +type BoxFuture = Box + Send>; + /// Generated Rust bindings to an Ethereum contract. #[derive(Clone, Debug)] pub struct {name} {{ @@ -118,15 +120,14 @@ pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, let call_future = match function.encode_call({to_tokens}) {{ Ok(call_data) => (call)(call_addr, call_data), - Err(e) => return future::err(format!("Error encoding call: {{:?}}", e)).boxed(), + Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))), }}; - call_future + Box::new(call_future .into_future() .and_then(move |out| function.decode_output(out).map_err(|e| format!("{{:?}}", e))) .map(Vec::into_iter) - .and_then(|mut outputs| {decode_outputs}) - .boxed() + .and_then(|mut outputs| {decode_outputs})) }} "##, abi_name = name, diff --git a/ethcore/src/engines/validator_set/multi.rs b/ethcore/src/engines/validator_set/multi.rs index 9e7e693bb..3f53b0363 100644 --- a/ethcore/src/engines/validator_set/multi.rs +++ b/ethcore/src/engines/validator_set/multi.rs @@ -164,8 +164,6 @@ mod tests { #[test] fn uses_current_set() { - let _ = ::env_logger::init(); - let tap = Arc::new(AccountProvider::transient_provider()); let s0: Secret = keccak("0").into(); let v0 = tap.insert_account(s0.clone(), "").unwrap(); diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 3c8c0714f..5faf5bf82 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -71,15 +71,12 @@ //! cargo build --release //! ``` -extern crate bit_set; extern crate bloomchain; extern crate bn; extern crate byteorder; extern crate crossbeam; extern crate common_types as types; extern crate crypto; -extern crate env_logger; -extern crate ethabi; extern crate ethash; extern crate ethcore_bloom_journal as bloom_journal; extern crate ethcore_devtools as devtools; @@ -119,7 +116,6 @@ extern crate unexpected; #[macro_use] extern crate rlp_derive; extern crate rustc_hex; -extern crate semver; extern crate stats; extern crate time; extern crate transient_hashmap; diff --git a/hash-fetch/Cargo.toml b/hash-fetch/Cargo.toml index a063c65ff..eca590a23 100644 --- a/hash-fetch/Cargo.toml +++ b/hash-fetch/Cargo.toml @@ -7,14 +7,12 @@ version = "1.8.0" authors = ["Parity Technologies "] [dependencies] -ethabi = "2.0" futures = "0.1" log = "0.3" -mime = "0.2" -mime_guess = "1.6.1" +mime = "0.3" +mime_guess = "2.0.0-alpha.2" rand = "0.3" rustc-hex = "1.0" -parking_lot = "0.4" fetch = { path = "../util/fetch" } ethcore-util = { path = "../util" } ethcore-bigint = { path = "../util/bigint" } @@ -22,3 +20,7 @@ ethcore-bytes = { path = "../util/bytes" } parity-reactor = { path = "../util/reactor" } native-contracts = { path = "../ethcore/native_contracts" } hash = { path = "../util/hash" } + +[dev-dependencies] +ethabi = "2.0" +parking_lot = "0.4" diff --git a/hash-fetch/src/lib.rs b/hash-fetch/src/lib.rs index 3279f868f..858a04ea5 100644 --- a/hash-fetch/src/lib.rs +++ b/hash-fetch/src/lib.rs @@ -20,24 +20,26 @@ #[macro_use] extern crate log; -#[macro_use] -extern crate mime; -extern crate ethabi; extern crate ethcore_util as util; extern crate ethcore_bigint as bigint; extern crate ethcore_bytes as bytes; extern crate futures; +extern crate hash; +extern crate mime; extern crate mime_guess; extern crate native_contracts; extern crate parity_reactor; -extern crate parking_lot; extern crate rand; extern crate rustc_hex; -extern crate hash; pub extern crate fetch; +#[cfg(test)] +extern crate parking_lot; +#[cfg(test)] +extern crate ethabi; + mod client; pub mod urlhint; diff --git a/hash-fetch/src/urlhint.rs b/hash-fetch/src/urlhint.rs index 95a16511b..e19860e32 100644 --- a/hash-fetch/src/urlhint.rs +++ b/hash-fetch/src/urlhint.rs @@ -18,15 +18,19 @@ use std::sync::Arc; use rustc_hex::ToHex; -use mime::Mime; +use mime::{self, Mime}; use mime_guess; use hash::keccak; -use futures::{future, BoxFuture, Future}; +use futures::{future, Future}; use native_contracts::{Registry, Urlhint}; use util::Address; use bytes::Bytes; +/// Boxed future that can be shared between threads. +/// TODO [ToDr] Use concrete types! +pub type BoxFuture = Box + Send>; + const COMMIT_LEN: usize = 20; /// RAW Contract interface. @@ -127,7 +131,7 @@ fn decode_urlhint_output(output: (String, ::bigint::hash::H160, Address)) -> Opt let commit = GithubApp::commit(&commit); if commit == Some(Default::default()) { - let mime = guess_mime_type(&account_slash_repo).unwrap_or(mime!(Application/_)); + let mime = guess_mime_type(&account_slash_repo).unwrap_or(mime::APPLICATION_JSON); return Some(URLHintResult::Content(Content { url: account_slash_repo, mime: mime, @@ -158,7 +162,8 @@ impl URLHint for URLHintContract { let do_call = |_, data| { let addr = match self.client.registrar() { Ok(addr) => addr, - Err(e) => return future::err(e).boxed(), + Err(e) => return Box::new(future::err(e)) + as BoxFuture, _>, }; self.client.call(addr, data) @@ -166,7 +171,7 @@ impl URLHint for URLHintContract { let urlhint = self.urlhint.clone(); let client = self.client.clone(); - self.registrar.get_address(do_call, keccak("githubhint"), "A".into()) + Box::new(self.registrar.get_address(do_call, keccak("githubhint"), "A".into()) .map(|addr| if addr == Address::default() { None } else { Some(addr) }) .and_then(move |address| { let mut fixed_id = [0; 32]; @@ -180,7 +185,7 @@ impl URLHint for URLHintContract { Either::B(urlhint.entries(do_call, ::bigint::hash::H256(fixed_id)).map(decode_urlhint_output)) } } - }).boxed() + })) } } @@ -213,7 +218,7 @@ pub mod tests { use std::str::FromStr; use rustc_hex::FromHex; - use futures::{BoxFuture, Future, IntoFuture}; + use futures::{Future, IntoFuture}; use super::*; use super::guess_mime_type; @@ -251,7 +256,7 @@ pub mod tests { fn call(&self, address: Address, data: Bytes) -> BoxFuture { self.calls.lock().push((address.to_hex(), data.to_hex())); let res = self.responses.lock().remove(0); - res.into_future().boxed() + Box::new(res.into_future()) } } @@ -326,7 +331,7 @@ pub mod tests { // then assert_eq!(res, Some(URLHintResult::Content(Content { url: "https://parity.io/assets/images/ethcore-black-horizontal.png".into(), - mime: mime!(Image/Png), + mime: mime::IMAGE_PNG, owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), }))) } @@ -358,9 +363,9 @@ pub mod tests { assert_eq!(guess_mime_type(url1), None); - assert_eq!(guess_mime_type(url2), Some(mime!(Image/Png))); - assert_eq!(guess_mime_type(url3), Some(mime!(Image/Png))); - assert_eq!(guess_mime_type(url4), Some(mime!(Image/Jpeg))); - assert_eq!(guess_mime_type(url5), Some(mime!(Image/Png))); + assert_eq!(guess_mime_type(url2), Some(mime::IMAGE_PNG)); + assert_eq!(guess_mime_type(url3), Some(mime::IMAGE_PNG)); + assert_eq!(guess_mime_type(url4), Some(mime::IMAGE_JPEG)); + assert_eq!(guess_mime_type(url5), Some(mime::IMAGE_PNG)); } } diff --git a/hw/src/ledger.rs b/hw/src/ledger.rs index b7c01a049..5fcac3e55 100644 --- a/hw/src/ledger.rs +++ b/hw/src/ledger.rs @@ -235,7 +235,7 @@ impl Manager { where F: Fn() -> Result { let mut err = Error::KeyNotFound; - /// Try to open device a few times. + // Try to open device a few times. for _ in 0..10 { match f() { Ok(handle) => return Ok(handle), diff --git a/ipfs/Cargo.toml b/ipfs/Cargo.toml index 89048081a..db76ddaa9 100644 --- a/ipfs/Cargo.toml +++ b/ipfs/Cargo.toml @@ -10,8 +10,9 @@ ethcore = { path = "../ethcore" } ethcore-util = { path = "../util" } ethcore-bigint = { path = "../util/bigint" } ethcore-bytes = { path = "../util/bytes" } -jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } rlp = { path = "../util/rlp" } -mime = "0.2" cid = "0.2" multihash = "0.6" +unicase = "2.0" diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 6a5f93092..ef6184b1b 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -14,43 +14,40 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#[macro_use] -extern crate mime; extern crate multihash; extern crate cid; +extern crate unicase; extern crate rlp; extern crate ethcore; extern crate ethcore_util as util; extern crate ethcore_bigint as bigint; extern crate ethcore_bytes as bytes; +extern crate jsonrpc_core as core; extern crate jsonrpc_http_server as http; pub mod error; mod route; -use std::io::Write; -use std::sync::Arc; +use std::thread; +use std::sync::{mpsc, Arc}; use std::net::{SocketAddr, IpAddr}; + +use core::futures::future::{self, FutureResult}; +use core::futures::{self, Future}; +use ethcore::client::BlockChainClient; +use http::hyper::header::{self, Vary, ContentType}; +use http::hyper::{Method, StatusCode}; +use http::hyper::{self, server}; +use unicase::Ascii; + use error::ServerError; use route::Out; -use http::hyper::server::{Handler, Request, Response}; -use http::hyper::net::HttpStream; -use http::hyper::header::{self, Vary, ContentLength, ContentType}; -use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; -use ethcore::client::BlockChainClient; -pub use http::hyper::server::Listening; pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler pub struct IpfsHandler { - /// Response to send out - out: Out, - /// How many bytes from the response have been written - out_progress: usize, - /// CORS response header - cors_header: Option, /// Allowed CORS domains cors_domains: Option>, /// Hostnames allowed in the `Host` request header @@ -66,124 +63,68 @@ impl IpfsHandler { pub fn new(cors: DomainsValidation, hosts: DomainsValidation, client: Arc) -> Self { IpfsHandler { - out: Out::Bad("Invalid Request"), - out_progress: 0, - cors_header: None, cors_domains: cors.into(), allowed_hosts: hosts.into(), client: client, } } -} - -/// Implement Hyper's HTTP handler -impl Handler for IpfsHandler { - fn on_request(&mut self, req: Request) -> Next { + pub fn on_request(&self, req: hyper::Request) -> (Option, Out) { match *req.method() { Method::Get | Method::Post => {}, - _ => return Next::write() + _ => return (None, Out::Bad("Invalid Request")), } if !http::is_host_allowed(&req, &self.allowed_hosts) { - self.out = Out::Bad("Disallowed Host header"); - - return Next::write(); + return (None, Out::Bad("Disallowed Host header")); } let cors_header = http::cors_header(&req, &self.cors_domains); if cors_header == http::CorsHeader::Invalid { - self.out = Out::Bad("Disallowed Origin header"); - - return Next::write(); - } - self.cors_header = cors_header.into(); - - let (path, query) = match *req.uri() { - RequestUri::AbsolutePath { ref path, ref query } => (path, query.as_ref().map(AsRef::as_ref)), - _ => return Next::write(), - }; - - self.out = self.route(path, query); - - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut Response) -> Next { - use Out::*; - - match self.out { - OctetStream(ref bytes) => { - use mime::{Mime, TopLevel, SubLevel}; - - // `OctetStream` is not a valid variant, so need to construct - // the type manually. - let content_type = Mime( - TopLevel::Application, - SubLevel::Ext("octet-stream".into()), - vec![] - ); - - res.headers_mut().set(ContentLength(bytes.len() as u64)); - res.headers_mut().set(ContentType(content_type)); - - }, - NotFound(reason) => { - res.set_status(StatusCode::NotFound); - - res.headers_mut().set(ContentLength(reason.len() as u64)); - res.headers_mut().set(ContentType(mime!(Text/Plain))); - }, - Bad(reason) => { - res.set_status(StatusCode::BadRequest); - - res.headers_mut().set(ContentLength(reason.len() as u64)); - res.headers_mut().set(ContentType(mime!(Text/Plain))); - } + return (None, Out::Bad("Disallowed Origin header")); } - if let Some(cors_header) = self.cors_header.take() { - res.headers_mut().set(cors_header); - res.headers_mut().set(Vary::Items(vec!["Origin".into()])); - } - - Next::write() - } - - fn on_response_writable(&mut self, transport: &mut Encoder) -> Next { - use Out::*; - - // Get the data to write as a byte slice - let data = match self.out { - OctetStream(ref bytes) => &bytes, - NotFound(reason) | Bad(reason) => reason.as_bytes(), - }; - - write_chunk(transport, &mut self.out_progress, data) + let path = req.uri().path(); + let query = req.uri().query(); + return (cors_header.into(), self.route(path, query)); } } -/// Attempt to write entire `data` from current `progress` -fn write_chunk(transport: &mut W, progress: &mut usize, data: &[u8]) -> Next { - // Skip any bytes that have already been written - let chunk = &data[*progress..]; +impl server::Service for IpfsHandler { + type Request = hyper::Request; + type Response = hyper::Response; + type Error = hyper::Error; + type Future = FutureResult; - // Write an get the amount of bytes written. End the connection in case of an error. - let written = match transport.write(chunk) { - Ok(written) => written, - Err(_) => return Next::end(), - }; + fn call(&self, request: Self::Request) -> Self::Future { + let (cors_header, out) = self.on_request(request); - *progress += written; + let mut res = match out { + Out::OctetStream(bytes) => { + hyper::Response::new() + .with_status(StatusCode::Ok) + .with_header(ContentType::octet_stream()) + .with_body(bytes) + }, + Out::NotFound(reason) => { + hyper::Response::new() + .with_status(StatusCode::NotFound) + .with_header(ContentType::plaintext()) + .with_body(reason) + }, + Out::Bad(reason) => { + hyper::Response::new() + .with_status(StatusCode::BadRequest) + .with_header(ContentType::plaintext()) + .with_body(reason) + } + }; - // Close the connection if the entire remaining chunk has been written - if written < chunk.len() { - Next::write() - } else { - Next::end() + if let Some(cors_header) = cors_header { + res.headers_mut().set(cors_header); + res.headers_mut().set(Vary::Items(vec![Ascii::new("Origin".into())])); + } + + future::ok(res) } } @@ -197,6 +138,19 @@ fn include_current_interface(mut hosts: Vec, interface: String, port: u16) hosts } +#[derive(Debug)] +pub struct Listening { + close: Option>, + thread: Option>, +} + +impl Drop for Listening { + fn drop(&mut self) { + self.close.take().unwrap().send(()).unwrap(); + let _ = self.thread.take().unwrap().join(); + } +} + pub fn start_server( port: u16, interface: String, @@ -210,67 +164,31 @@ pub fn start_server( let hosts: Option> = hosts.into(); let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); - Ok( - http::hyper::Server::http(&addr)? - .handle(move |_| IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())) - .map(|(listening, srv)| { + let (close, shutdown_signal) = futures::sync::oneshot::channel::<()>(); + let (tx, rx) = mpsc::sync_channel(1); + let thread = thread::spawn(move || { + let send = |res| tx.send(res).expect("rx end is never dropped; qed"); + let server = match server::Http::new().bind(&addr, move || { + Ok(IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())) + }) { + Ok(server) => { + send(Ok(())); + server + }, + Err(err) => { + send(Err(err)); + return; + } + }; - ::std::thread::spawn(move || { - srv.run(); - }); + let _ = server.run_until(shutdown_signal.map_err(|_| {})); + }); - listening - })? - ) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn write_chunk_to_vec() { - let mut transport = Vec::new(); - let mut progress = 0; - - let _ = write_chunk(&mut transport, &mut progress, b"foobar"); - - assert_eq!(b"foobar".to_vec(), transport); - assert_eq!(6, progress); - } - - #[test] - fn write_chunk_to_vec_part() { - let mut transport = Vec::new(); - let mut progress = 3; - - let _ = write_chunk(&mut transport, &mut progress, b"foobar"); - - assert_eq!(b"bar".to_vec(), transport); - assert_eq!(6, progress); - } - - #[test] - fn write_chunk_to_array() { - use std::io::Cursor; - - let mut buf = [0u8; 3]; - let mut progress = 0; - - { - let mut transport: Cursor<&mut [u8]> = Cursor::new(&mut buf); - let _ = write_chunk(&mut transport, &mut progress, b"foobar"); - } - - assert_eq!(*b"foo", buf); - assert_eq!(3, progress); - - { - let mut transport: Cursor<&mut [u8]> = Cursor::new(&mut buf); - let _ = write_chunk(&mut transport, &mut progress, b"foobar"); - } - - assert_eq!(*b"bar", buf); - assert_eq!(6, progress); - } + // Wait for server to start successfuly. + rx.recv().expect("tx end is never dropped; qed")?; + + Ok(Listening { + close: close.into(), + thread: thread.into(), + }) } diff --git a/js/Cargo.precompiled.toml b/js/Cargo.precompiled.toml index 87369005c..331b98cf1 100644 --- a/js/Cargo.precompiled.toml +++ b/js/Cargo.precompiled.toml @@ -12,8 +12,8 @@ use-precompiled-js = ["parity-dapps-glue/use-precompiled-js"] with-syntex = ["parity-dapps-glue/with-syntex"] [build-dependencies] -parity-dapps-glue = "1.7" +parity-dapps-glue = "1.8" [dependencies] -parity-dapps-glue = "1.7" +parity-dapps-glue = "1.8" diff --git a/js/Cargo.toml b/js/Cargo.toml index cf7ba957b..1ff166cca 100644 --- a/js/Cargo.toml +++ b/js/Cargo.toml @@ -11,8 +11,8 @@ default = ["with-syntex"] with-syntex = ["parity-dapps-glue/with-syntex"] [build-dependencies] -parity-dapps-glue = "1.7" +parity-dapps-glue = "1.8" [dependencies] -parity-dapps-glue = "1.7" +parity-dapps-glue = "1.8" diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 7b57f383f..a2cf6d2a0 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -70,6 +70,7 @@ pub fn setup_log(config: &Config) -> Result, String> { // Disable info logging by default for some modules: builder.filter(Some("ws"), LogLevelFilter::Warn); builder.filter(Some("reqwest"), LogLevelFilter::Warn); + builder.filter(Some("hyper"), LogLevelFilter::Warn); builder.filter(Some("rustls"), LogLevelFilter::Warn); // Enable info for others. builder.filter(None, LogLevelFilter::Info); diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 4c0a05fd9..7e14eab6d 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -480,7 +480,7 @@ usage! { ARG arg_jsonrpc_server_threads: (Option) = None, or |c: &Config| otry!(c.rpc).server_threads, "--jsonrpc-server-threads=[NUM]", - "Enables experimental faster implementation of JSON-RPC server. Requires Dapps server to be disabled using --no-dapps.", + "Enables multiple threads handling incoming connections for HTTP JSON-RPC server.", ["API and console options – WebSockets"] FLAG flag_no_ws: (bool) = false, or |c: &Config| otry!(c.websockets).disable.clone(), diff --git a/parity/configuration.rs b/parity/configuration.rs index 1c751ab64..007d2d911 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -141,16 +141,11 @@ impl Configuration { } let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let geth_compatibility = self.args.flag_geth; - let mut dapps_conf = self.dapps_config(); + let dapps_conf = self.dapps_config(); let ipfs_conf = self.ipfs_config(); let secretstore_conf = self.secretstore_config()?; let format = self.format()?; - if self.args.arg_jsonrpc_server_threads.is_some() && dapps_conf.enabled { - dapps_conf.enabled = false; - writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr."); - } - let cmd = if self.args.flag_version { Cmd::Version } else if self.args.cmd_signer { @@ -867,9 +862,8 @@ impl Configuration { hosts: self.rpc_hosts(), cors: self.rpc_cors(), server_threads: match self.args.arg_jsonrpc_server_threads { - Some(threads) if threads > 0 => Some(threads), - None => None, - _ => return Err("--jsonrpc-server-threads number needs to be positive.".into()), + Some(threads) if threads > 0 => threads, + _ => 1, }, processing_threads: self.args.arg_jsonrpc_threads, }; diff --git a/parity/dapps.rs b/parity/dapps.rs index 24db4730d..364f563c5 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -21,7 +21,8 @@ use dir::default_data_path; use ethcore::client::{Client, BlockChainClient, BlockId}; use ethcore::transaction::{Transaction, Action}; use ethsync::LightSync; -use futures::{future, IntoFuture, Future, BoxFuture}; +use futures::{future, IntoFuture, Future}; +use jsonrpc_core::BoxFuture; use hash_fetch::fetch::Client as FetchClient; use hash_fetch::urlhint::ContractClient; use helpers::replace_home; @@ -30,7 +31,6 @@ use light::on_demand::{self, OnDemand}; use node_health::{SyncStatus, NodeHealth}; use rpc; use rpc_apis::SignerService; -use parity_reactor; use util::Address; use bytes::Bytes; @@ -81,9 +81,8 @@ impl ContractClient for FullRegistrar { } fn call(&self, address: Address, data: Bytes) -> BoxFuture { - self.client.call_contract(BlockId::Latest, address, data) - .into_future() - .boxed() + Box::new(self.client.call_contract(BlockId::Latest, address, data) + .into_future()) } } @@ -113,7 +112,7 @@ impl ContractClient for LightRegistrar { let env_info = match env_info { Ok(x) => x, - Err(e) => return future::err(e).boxed(), + Err(e) => return Box::new(future::err(e)), }; let maybe_future = self.sync.with_context(move |ctx| { @@ -140,8 +139,8 @@ impl ContractClient for LightRegistrar { }); match maybe_future { - Some(fut) => fut.boxed(), - None => future::err("cannot query registry: network disabled".into()).boxed(), + Some(fut) => Box::new(fut), + None => Box::new(future::err("cannot query registry: network disabled".into())), } } } @@ -153,7 +152,6 @@ pub struct Dependencies { pub node_health: NodeHealth, pub sync_status: Arc, pub contract_client: Arc, - pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, pub ui_address: Option<(String, u16)>, @@ -235,7 +233,6 @@ mod server { use rpc_apis; use parity_dapps; - use parity_reactor; pub use parity_dapps::Middleware; @@ -248,12 +245,11 @@ mod server { extra_script_src: Vec<(String, u16)>, ) -> Result { let signer = deps.signer; - let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); let web_proxy_tokens = Arc::new(move |token| signer.web_proxy_access_token_domain(&token)); Ok(parity_dapps::Middleware::dapps( + deps.fetch.pool(), deps.node_health, - parity_remote, deps.ui_address, extra_embed_on, extra_script_src, @@ -271,10 +267,9 @@ mod server { deps: Dependencies, dapps_domain: &str, ) -> Result { - let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); Ok(parity_dapps::Middleware::ui( + deps.fetch.pool(), deps.node_health, - parity_remote, dapps_domain, deps.contract_client, deps.sync_status, diff --git a/parity/light_helpers/epoch_fetch.rs b/parity/light_helpers/epoch_fetch.rs index cb570d069..77a623016 100644 --- a/parity/light_helpers/epoch_fetch.rs +++ b/parity/light_helpers/epoch_fetch.rs @@ -23,7 +23,8 @@ use ethcore::machine::EthereumMachine; use ethcore::receipt::Receipt; use ethsync::LightSync; -use futures::{future, Future, BoxFuture}; +use futures::{future, Future}; +use futures::future::Either; use light::client::fetch::ChainDataFetcher; use light::on_demand::{request, OnDemand}; @@ -33,6 +34,8 @@ use bigint::hash::H256; const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed"; +type BoxFuture = Box>; + /// Allows on-demand fetch of data useful for the light client. pub struct EpochFetch { /// A handle to the sync service. @@ -45,7 +48,7 @@ impl EpochFetch { fn request(&self, req: T) -> BoxFuture where T: Send + request::RequestAdapter + 'static, T::Out: Send + 'static { - match self.sync.read().upgrade() { + Box::new(match self.sync.read().upgrade() { Some(sync) => { let on_demand = &self.on_demand; let maybe_future = sync.with_context(move |ctx| { @@ -53,12 +56,12 @@ impl EpochFetch { }); match maybe_future { - Some(x) => x.map_err(|_| "Request canceled").boxed(), - None => future::err("Unable to access network.").boxed(), + Some(x) => Either::A(x.map_err(|_| "Request canceled")), + None => Either::B(future::err("Unable to access network.")), } } - None => future::err("Unable to access network").boxed(), - } + None => Either::B(future::err("Unable to access network")), + }) } } diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs index e024e70a5..6ac40baf8 100644 --- a/parity/light_helpers/queue_cull.rs +++ b/parity/light_helpers/queue_cull.rs @@ -94,11 +94,11 @@ impl IoHandler for QueueCull }); match maybe_fetching { - Some(fut) => fut.boxed(), + Some(fut) => future::Either::A(fut), None => { debug!(target: "cull", "Unable to acquire network context; qed"); - future::ok(()).boxed() - } + future::Either::B(future::ok(())) + }, } }, Duration::from_millis(PURGE_TIMEOUT_MS), || {}) } diff --git a/parity/rpc.rs b/parity/rpc.rs index 004e12a28..12667d3ae 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -42,7 +42,7 @@ pub struct HttpConfiguration { pub apis: ApiSet, pub cors: Option>, pub hosts: Option>, - pub server_threads: Option, + pub server_threads: usize, pub processing_threads: usize, } @@ -61,7 +61,7 @@ impl Default for HttpConfiguration { apis: ApiSet::UnsafeContext, cors: None, hosts: Some(Vec::new()), - server_threads: None, + server_threads: 1, processing_threads: 0, } } @@ -100,7 +100,7 @@ impl From for HttpConfiguration { apis: rpc_apis::ApiSet::UnsafeContext, cors: None, hosts: conf.hosts, - server_threads: None, + server_threads: 1, processing_threads: 0, } } @@ -278,13 +278,8 @@ pub fn new_http( handler, remote, rpc::RpcExtractor, - match (conf.server_threads, middleware) { - (Some(threads), None) => rpc::HttpSettings::Threads(threads), - (None, middleware) => rpc::HttpSettings::Dapps(middleware), - (Some(_), Some(_)) => { - return Err("Dapps and fast multi-threaded RPC server cannot be enabled at the same time.".into()) - }, - } + middleware, + conf.server_threads, ); match start_result { diff --git a/parity/run.rs b/parity/run.rs index 7a50054f3..4b971f837 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -327,7 +327,6 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> sync_status, node_health, contract_client: contract_client, - remote: event_loop.raw_remote(), fetch: fetch.clone(), signer: signer_service.clone(), ui_address: cmd.ui_conf.redirection_address(), @@ -721,7 +720,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R sync_status, node_health, contract_client: contract_client, - remote: event_loop.raw_remote(), fetch: fetch.clone(), signer: signer_service.clone(), ui_address: cmd.ui_conf.redirection_address(), diff --git a/price-info/src/lib.rs b/price-info/src/lib.rs index ba5719f40..7036cfefc 100644 --- a/price-info/src/lib.rs +++ b/price-info/src/lib.rs @@ -92,7 +92,7 @@ impl Client { /// Gets the current ETH price and calls `set_price` with the result. pub fn get(&self, set_price: G) { - self.fetch.forget(self.fetch.fetch(&self.api_endpoint) + self.fetch.process_and_forget(self.fetch.fetch(&self.api_endpoint) .map_err(|err| Error::Fetch(err)) .and_then(move |mut response| { if !response.is_success() { @@ -156,10 +156,11 @@ mod test { } // this guarantees that the calls to price_info::Client::get will block for execution - fn forget(&self, f: F) where + fn process_and_forget(&self, f: F) where F: Future + Send + 'static, I: Send + 'static, - E: Send + 'static { + E: Send + 'static, + { let _ = f.wait(); } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 377a75b21..0b393f16d 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -10,7 +10,6 @@ authors = ["Parity Technologies "] [dependencies] ansi_term = "0.9" cid = "0.2" -futures = "0.1" futures-cpupool = "0.1" log = "0.3" multihash ="0.6" @@ -28,13 +27,12 @@ tokio-timer = "0.1" transient-hashmap = "0.4" itertools = "0.5" -jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-minihttp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } ethcore-io = { path = "../util/io" } ethcore-ipc = { path = "../ipc/rpc" } diff --git a/rpc/src/http_common.rs b/rpc/src/http_common.rs index edbb16140..72af6e469 100644 --- a/rpc/src/http_common.rs +++ b/rpc/src/http_common.rs @@ -19,7 +19,6 @@ use jsonrpc_core; use http; use hyper; -use minihttp; /// HTTP RPC server impl-independent metadata extractor pub trait HttpMetaExtractor: Send + Sync + 'static { @@ -29,24 +28,22 @@ pub trait HttpMetaExtractor: Send + Sync + 'static { fn read_metadata(&self, origin: Option, user_agent: Option, dapps_origin: Option) -> Self::Metadata; } -pub struct HyperMetaExtractor { +pub struct MetaExtractor { extractor: T, } -impl HyperMetaExtractor { +impl MetaExtractor { pub fn new(extractor: T) -> Self { - HyperMetaExtractor { - extractor: extractor, - } + MetaExtractor { extractor } } } -impl http::MetaExtractor for HyperMetaExtractor where +impl http::MetaExtractor for MetaExtractor where T: HttpMetaExtractor, M: jsonrpc_core::Metadata, { - fn read_metadata(&self, req: &hyper::server::Request) -> M { - let as_string = |header: Option<&http::request_response::header::Raw>| header + fn read_metadata(&self, req: &hyper::server::Request) -> M { + let as_string = |header: Option<&hyper::header::Raw>| header .and_then(|raw| raw.one()) .map(|raw| String::from_utf8_lossy(raw).into_owned()); @@ -56,28 +53,3 @@ impl http::MetaExtractor for HyperMetaExtractor where self.extractor.read_metadata(origin, user_agent, dapps_origin) } } - -pub struct MiniMetaExtractor { - extractor: T, -} - -impl MiniMetaExtractor { - pub fn new(extractor: T) -> Self { - MiniMetaExtractor { - extractor: extractor, - } - } -} - -impl minihttp::MetaExtractor for MiniMetaExtractor where - T: HttpMetaExtractor, - M: jsonrpc_core::Metadata, -{ - fn read_metadata(&self, req: &minihttp::Req) -> M { - let origin = req.header("origin").map(|h| h.to_owned()); - let user_agent = req.header("user-agent").map(|h| h.to_owned()); - let dapps_origin = req.header("x-parity-origin").map(|h| h.to_owned()); - - self.extractor.read_metadata(origin, user_agent, dapps_origin) - } -} diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index caf998334..c37ebad1f 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -23,7 +23,6 @@ extern crate ansi_term; extern crate cid; extern crate crypto as rust_crypto; -extern crate futures; extern crate futures_cpupool; extern crate itertools; extern crate multihash; @@ -41,7 +40,6 @@ extern crate transient_hashmap; extern crate jsonrpc_core; extern crate jsonrpc_http_server as http; extern crate jsonrpc_ipc_server as ipc; -extern crate jsonrpc_minihttp_server as minihttp; extern crate jsonrpc_pubsub; extern crate ethash; @@ -109,22 +107,8 @@ use std::net::SocketAddr; use http::tokio_core; /// RPC HTTP Server instance -pub enum HttpServer { - /// Fast MiniHTTP variant - Mini(minihttp::Server), - /// Hyper variant - Hyper(http::Server), -} +pub type HttpServer = http::Server; -impl HttpServer { - /// Returns current listening address. - pub fn address(&self) -> &SocketAddr { - match *self { - HttpServer::Mini(ref s) => s.address(), - HttpServer::Hyper(ref s) => &s.addrs()[0], - } - } -} /// RPC HTTP Server error #[derive(Debug)] @@ -145,23 +129,6 @@ impl From for HttpServerError { } } -impl From for HttpServerError { - fn from(e: minihttp::Error) -> Self { - use self::HttpServerError::*; - match e { - minihttp::Error::Io(io) => Io(io), - } - } -} - -/// HTTP server implementation-specific settings. -pub enum HttpSettings { - /// Enable fast minihttp server with given number of threads. - Threads(usize), - /// Enable standard server with optional dapps middleware. - Dapps(Option), -} - /// Start http server asynchronously and returns result with `Server` handle on success or an error. pub fn start_http( addr: &SocketAddr, @@ -170,7 +137,8 @@ pub fn start_http( handler: H, remote: tokio_core::reactor::Remote, extractor: T, - settings: HttpSettings, + middleware: Option, + threads: usize, ) -> Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, @@ -178,30 +146,18 @@ pub fn start_http( T: HttpMetaExtractor, R: RequestMiddleware, { - Ok(match settings { - HttpSettings::Dapps(middleware) => { - let mut builder = http::ServerBuilder::new(handler) - .event_loop_remote(remote) - .meta_extractor(http_common::HyperMetaExtractor::new(extractor)) - .cors(cors_domains.into()) - .allowed_hosts(allowed_hosts.into()); + let mut builder = http::ServerBuilder::new(handler) + .threads(threads) + .event_loop_remote(remote) + .meta_extractor(http_common::MetaExtractor::new(extractor)) + .cors(cors_domains.into()) + .allowed_hosts(allowed_hosts.into()); - if let Some(dapps) = middleware { - builder = builder.request_middleware(dapps) - } - builder.start_http(addr) - .map(HttpServer::Hyper)? - }, - HttpSettings::Threads(threads) => { - minihttp::ServerBuilder::new(handler) - .threads(threads) - .meta_extractor(http_common::MiniMetaExtractor::new(extractor)) - .cors(cors_domains.into()) - .allowed_hosts(allowed_hosts.into()) - .start_http(addr) - .map(HttpServer::Mini)? - }, - }) + if let Some(dapps) = middleware { + builder = builder.request_middleware(dapps) + } + + Ok(builder.start_http(addr)?) } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. diff --git a/rpc/src/tests/rpc.rs b/rpc/src/tests/rpc.rs index 7bd156cf5..6e2900c8b 100644 --- a/rpc/src/tests/rpc.rs +++ b/rpc/src/tests/rpc.rs @@ -18,7 +18,7 @@ use devtools::http_client; use jsonrpc_core::MetaIoHandler; use http::{self, hyper}; -use {HttpSettings, HttpServer}; +use {HttpServer}; use tests::helpers::Server; use v1::{extractors, Metadata}; @@ -33,11 +33,13 @@ fn serve(handler: Option>) -> Server { handler, remote, extractors::RpcExtractor, - HttpSettings::Dapps(Some(|_req: &hyper::server::Request, _control: &hyper::Control| { + Some(|request: hyper::Request| { http::RequestMiddlewareAction::Proceed { - should_continue_on_invalid_cors: false + should_continue_on_invalid_cors: false, + request, } - })), + }), + 1, ).unwrap()) } @@ -49,14 +51,13 @@ fn request(server: Server, request: &str) -> http_client::Response { #[cfg(test)] mod testsing { use jsonrpc_core::{MetaIoHandler, Value}; - use jsonrpc_core::futures::{Future, future}; use v1::Metadata; use super::{request, Server}; fn serve() -> (Server<::HttpServer>, ::std::net::SocketAddr) { let mut io = MetaIoHandler::default(); io.add_method_with_meta("hello", |_, meta: Metadata| { - future::ok(Value::String(format!("{}", meta.origin))).boxed() + Ok(Value::String(format!("{}", meta.origin))) }); let server = super::serve(Some(io)); let address = server.server.address().to_owned(); diff --git a/rpc/src/v1/extractors.rs b/rpc/src/v1/extractors.rs index 7685b110e..a24722900 100644 --- a/rpc/src/v1/extractors.rs +++ b/rpc/src/v1/extractors.rs @@ -236,7 +236,7 @@ impl> core::Middleware for WsDispatcher< if use_full { A(self.full_handler.handle_rpc_request(request, meta)) } else { - B(process(request, meta).boxed()) + B(Box::new(process(request, meta))) } } } diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index c616a70e1..0da257035 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -20,7 +20,6 @@ use std::fmt::Debug; use std::ops::Deref; use std::sync::Arc; -use futures::{future, Future, BoxFuture}; use light::cache::Cache as LightDataCache; use light::client::LightChainClient; use light::on_demand::{request, OnDemand}; @@ -43,7 +42,9 @@ use ethcore::transaction::{Action, SignedTransaction, PendingTransaction, Transa use ethcore::account_provider::AccountProvider; use crypto::DEFAULT_MAC; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; +use jsonrpc_core::futures::future::Either; use v1::helpers::{errors, TransactionRequest, FilledTransactionRequest, ConfirmationPayload}; use v1::types::{ H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes, @@ -120,7 +121,7 @@ impl Dispatcher for FullDispatcher request.nonce, true => Some(Self::fill_nonce(request.nonce, &from, &miner, &client)), }; - future::ok(FilledTransactionRequest { + Box::new(future::ok(FilledTransactionRequest { from: from, used_default_from: request.from.is_none(), to: request.to, @@ -130,7 +131,7 @@ impl Dispatcher for FullDispatcher, filled: FilledTransactionRequest, password: SignWith) @@ -139,7 +140,7 @@ impl Dispatcher for FullDispatcher Dispatcher for FullDispatcher Result { @@ -182,7 +183,7 @@ pub fn fetch_gas_price_corpus( const GAS_PRICE_SAMPLE_SIZE: usize = 100; if let Some(cached) = { cache.lock().gas_price_corpus() } { - return future::ok(cached).boxed() + return Box::new(future::ok(cached)) } let cache = cache.clone(); @@ -217,8 +218,8 @@ pub fn fetch_gas_price_corpus( }); match eventual_corpus { - Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(), - None => future::err(errors::network_disabled()).boxed(), + Some(corp) => Box::new(corp.map_err(|_| errors::no_light_peers())), + None => Box::new(future::err(errors::network_disabled())), } } @@ -284,7 +285,7 @@ impl LightDispatcher { // fast path where we don't go to network; nonce provided or can be gotten from queue. let maybe_nonce = self.transaction_queue.read().next_nonce(&addr); if let Some(nonce) = maybe_nonce { - return future::ok(nonce).boxed() + return Box::new(future::ok(nonce)) } let best_header = self.client.best_block_header(); @@ -295,11 +296,11 @@ impl LightDispatcher { }).expect("no back-references; therefore all back-references valid; qed")); match nonce_future { - Some(x) => + Some(x) => Box::new( x.map(move |acc| acc.map_or(account_start_nonce, |acc| acc.nonce)) .map_err(|_| errors::no_light_peers()) - .boxed(), - None => future::err(errors::network_disabled()).boxed() + ), + None => Box::new(future::err(errors::network_disabled())) } } } @@ -332,29 +333,29 @@ impl Dispatcher for LightDispatcher { // fast path for known gas price. let gas_price = match request_gas_price { - Some(gas_price) => future::ok(with_gas_price(gas_price)).boxed(), - None => fetch_gas_price_corpus( + Some(gas_price) => Either::A(future::ok(with_gas_price(gas_price))), + None => Either::B(fetch_gas_price_corpus( self.sync.clone(), self.client.clone(), self.on_demand.clone(), self.cache.clone() ).and_then(|corp| match corp.median() { - Some(median) => future::ok(*median), - None => future::ok(DEFAULT_GAS_PRICE), // fall back to default on error. - }).map(with_gas_price).boxed() + Some(median) => Ok(*median), + None => Ok(DEFAULT_GAS_PRICE), // fall back to default on error. + }).map(with_gas_price)) }; match (request_nonce, force_nonce) { - (_, false) | (Some(_), true) => gas_price, + (_, false) | (Some(_), true) => Box::new(gas_price), (None, true) => { let next_nonce = self.next_nonce(from); - gas_price.and_then(move |mut filled| next_nonce + Box::new(gas_price.and_then(move |mut filled| next_nonce .map_err(|_| errors::no_light_peers()) .map(move |nonce| { filled.nonce = Some(nonce); filled }) - ).boxed() + )) }, } } @@ -390,13 +391,12 @@ impl Dispatcher for LightDispatcher { // fast path for pre-filled nonce. if let Some(nonce) = filled.nonce { - return future::done(with_nonce(filled, nonce)).boxed() + return Box::new(future::done(with_nonce(filled, nonce))) } - self.next_nonce(address) + Box::new(self.next_nonce(address) .map_err(|_| errors::no_light_peers()) - .and_then(move |nonce| with_nonce(filled, nonce)) - .boxed() + .and_then(move |nonce| with_nonce(filled, nonce))) } fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result { @@ -497,7 +497,7 @@ pub fn execute( match payload { ConfirmationPayload::SendTransaction(request) => { let condition = request.condition.clone().map(Into::into); - dispatcher.sign(accounts, request, pass) + Box::new(dispatcher.sign(accounts, request, pass) .map(move |v| v.map(move |tx| PendingTransaction::new(tx, condition))) .map(WithToken::into_tuple) .map(|(tx, token)| (tx, token, dispatcher)) @@ -506,18 +506,18 @@ pub fn execute( .map(RpcH256::from) .map(ConfirmationResponse::SendTransaction) .map(move |h| WithToken::from((h, tok))) - }).boxed() + })) }, ConfirmationPayload::SignTransaction(request) => { - dispatcher.sign(accounts, request, pass) + Box::new(dispatcher.sign(accounts, request, pass) .map(|result| result .map(RpcRichRawTransaction::from) .map(ConfirmationResponse::SignTransaction) - ).boxed() + )) }, ConfirmationPayload::EthSignMessage(address, data) => { if accounts.is_hardware_address(address) { - return future::err(errors::unsupported("Signing via hardware wallets is not supported.", None)).boxed(); + return Box::new(future::err(errors::unsupported("Signing via hardware wallets is not supported.", None))); } let hash = eth_data_hash(data); @@ -527,11 +527,11 @@ pub fn execute( .map(RpcH520::from) .map(ConfirmationResponse::Signature) ); - future::done(res).boxed() + Box::new(future::done(res)) }, ConfirmationPayload::Decrypt(address, data) => { if accounts.is_hardware_address(address) { - return future::err(errors::unsupported("Decrypting via hardware wallets is not supported.", None)).boxed(); + return Box::new(future::err(errors::unsupported("Decrypting via hardware wallets is not supported.", None))); } let res = decrypt(&accounts, address, data, pass) @@ -539,7 +539,7 @@ pub fn execute( .map(RpcBytes) .map(ConfirmationResponse::Decrypt) ); - future::done(res).boxed() + Box::new(future::done(res)) }, } } @@ -602,20 +602,18 @@ pub fn from_rpc(payload: RpcConfirmationPayload, default_account: Address, di { match payload { RpcConfirmationPayload::SendTransaction(request) => { - dispatcher.fill_optional_fields(request.into(), default_account, false) - .map(ConfirmationPayload::SendTransaction) - .boxed() + Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false) + .map(ConfirmationPayload::SendTransaction)) }, RpcConfirmationPayload::SignTransaction(request) => { - dispatcher.fill_optional_fields(request.into(), default_account, false) - .map(ConfirmationPayload::SignTransaction) - .boxed() + Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false) + .map(ConfirmationPayload::SignTransaction)) }, RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => { - future::ok(ConfirmationPayload::Decrypt(address.into(), msg.into())).boxed() + Box::new(future::ok(ConfirmationPayload::Decrypt(address.into(), msg.into()))) }, RpcConfirmationPayload::EthSignMessage(RpcSignRequest { address, data }) => { - future::ok(ConfirmationPayload::EthSignMessage(address.into(), data.into())).boxed() + Box::new(future::ok(ConfirmationPayload::EthSignMessage(address.into(), data.into()))) }, } } diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 23cbbfefc..3960107e7 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -20,7 +20,7 @@ use std::fmt; use rlp::DecoderError; use ethcore::error::{Error as EthcoreError, CallError, TransactionError}; use ethcore::account_provider::{SignError as AccountError}; -use jsonrpc_core::{Error, ErrorCode, Value}; +use jsonrpc_core::{futures, Error, ErrorCode, Value}; mod codes { // NOTE [ToDr] Codes from [-32099, -32000] @@ -379,6 +379,6 @@ pub fn deprecated>>(message: T) -> Error { } // on-demand sender cancelled. -pub fn on_demand_cancel(_cancel: ::futures::sync::oneshot::Canceled) -> Error { +pub fn on_demand_cancel(_cancel: futures::sync::oneshot::Canceled) -> Error { internal("on-demand sender cancelled", "") } diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index b056171fa..bb030b46a 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -25,9 +25,9 @@ use ethcore::ids::BlockId; use ethcore::filter::Filter as EthcoreFilter; use ethcore::transaction::{Action, Transaction as EthTransaction}; -use futures::{future, Future, BoxFuture}; -use futures::future::Either; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; +use jsonrpc_core::futures::future::Either; use jsonrpc_macros::Trailing; use light::cache::Cache; @@ -113,22 +113,21 @@ impl LightFetch { let mut reqs = Vec::new(); let header_ref = match self.make_header_requests(id, &mut reqs) { Ok(r) => r, - Err(e) => return future::err(e).boxed(), + Err(e) => return Box::new(future::err(e)), }; let maybe_future = self.sync.with_context(move |ctx| { - self.on_demand.request_raw(ctx, reqs) + Box::new(self.on_demand.request_raw(ctx, reqs) .expect("all back-references known to be valid; qed") .map(|res| extract_header(&res, header_ref) .expect("these responses correspond to requests that header_ref belongs to. \ therefore it will not fail; qed")) - .map_err(errors::on_demand_cancel) - .boxed() + .map_err(errors::on_demand_cancel)) }); match maybe_future { Some(recv) => recv, - None => future::err(errors::network_disabled()).boxed() + None => Box::new(future::err(errors::network_disabled())) } } @@ -138,25 +137,24 @@ impl LightFetch { let mut reqs = Vec::new(); let header_ref = match self.make_header_requests(id, &mut reqs) { Ok(r) => r, - Err(e) => return future::err(e).boxed(), + Err(e) => return Box::new(future::err(e)), }; reqs.push(request::Account { header: header_ref, address: address }.into()); let maybe_future = self.sync.with_context(move |ctx| { - self.on_demand.request_raw(ctx, reqs) + Box::new(self.on_demand.request_raw(ctx, reqs) .expect("all back-references known to be valid; qed") .map(|mut res| match res.pop() { Some(OnDemandResponse::Account(acc)) => acc, _ => panic!("responses correspond directly with requests in amount and type; qed"), }) - .map_err(errors::on_demand_cancel) - .boxed() + .map_err(errors::on_demand_cancel)) }); match maybe_future { Some(recv) => recv, - None => future::err(errors::network_disabled()).boxed() + None => Box::new(future::err(errors::network_disabled())) } } @@ -193,7 +191,7 @@ impl LightFetch { let header_fut = self.header(id); // fetch missing transaction fields from the network. - nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| { + Box::new(nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| { let action = req.to.map_or(Action::Create, Action::Call); let value = req.value.unwrap_or_else(U256::zero); let data = req.data.unwrap_or_default(); @@ -222,10 +220,10 @@ impl LightFetch { // TODO: get last-hashes from network. let env_info = match client.env_info(id) { Some(env_info) => env_info, - _ => return future::err(errors::unknown_block()).boxed(), + _ => return Either::A(future::err(errors::unknown_block())), }; - execute_tx(gas_known, ExecuteParams { + Either::B(execute_tx(gas_known, ExecuteParams { from: from, tx: tx, hdr: hdr, @@ -233,8 +231,8 @@ impl LightFetch { engine: client.engine().clone(), on_demand: on_demand, sync: sync, - }) - }).boxed() + })) + })) } /// get a block itself. fails on unknown block ID. @@ -242,33 +240,31 @@ impl LightFetch { let mut reqs = Vec::new(); let header_ref = match self.make_header_requests(id, &mut reqs) { Ok(r) => r, - Err(e) => return future::err(e).boxed(), + Err(e) => return Box::new(future::err(e)), }; reqs.push(request::Body(header_ref).into()); let maybe_future = self.sync.with_context(move |ctx| { - self.on_demand.request_raw(ctx, reqs) + Box::new(self.on_demand.request_raw(ctx, reqs) .expect("all back-references known to be valid; qed") .map(|mut res| match res.pop() { Some(OnDemandResponse::Body(b)) => b, _ => panic!("responses correspond directly with requests in amount and type; qed"), }) - .map_err(errors::on_demand_cancel) - .boxed() + .map_err(errors::on_demand_cancel)) }); match maybe_future { Some(recv) => recv, - None => future::err(errors::network_disabled()).boxed() + None => Box::new(future::err(errors::network_disabled())) } } /// get transaction logs pub fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error> { use std::collections::BTreeMap; - - use futures::stream::{self, Stream}; + use jsonrpc_core::futures::stream::{self, Stream}; const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; @@ -282,9 +278,9 @@ impl LightFetch { }; match (block_number(filter.to_block), block_number(filter.from_block)) { - (Some(to), Some(from)) if to < from => return future::ok(Vec::new()).boxed(), + (Some(to), Some(from)) if to < from => return Box::new(future::ok(Vec::new())), (Some(_), Some(_)) => {}, - _ => return future::err(errors::unknown_block()).boxed(), + _ => return Box::new(future::err(errors::unknown_block())), } let maybe_future = self.sync.with_context(move |ctx| { @@ -318,8 +314,8 @@ impl LightFetch { }); match maybe_future { - Some(fut) => fut.boxed(), - None => future::err(errors::network_disabled()).boxed(), + Some(fut) => Box::new(fut), + None => Box::new(future::err(errors::network_disabled())), } } } @@ -339,7 +335,7 @@ struct ExecuteParams { // this will double the gas on each `OutOfGas` error. fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture { if !gas_known { - future::loop_fn(params, |mut params| { + Box::new(future::loop_fn(params, |mut params| { execute_tx(true, params.clone()).and_then(move |res| { match res { Ok(executed) => { @@ -360,7 +356,7 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture Ok(future::Loop::Break(failed)), } }) - }).boxed() + })) } else { trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand", params.tx.gas); @@ -381,8 +377,8 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture fut.boxed(), - None => future::err(errors::network_disabled()).boxed(), + Some(fut) => Box::new(fut), + None => Box::new(future::err(errors::network_disabled())), } } } diff --git a/rpc/src/v1/helpers/oneshot.rs b/rpc/src/v1/helpers/oneshot.rs index c128ccf55..89e90dbd1 100644 --- a/rpc/src/v1/helpers/oneshot.rs +++ b/rpc/src/v1/helpers/oneshot.rs @@ -15,8 +15,8 @@ // along with Parity. If not, see . use jsonrpc_core::Error; -use futures::{self, Future}; -use futures::sync::oneshot; +use jsonrpc_core::futures::{self, Future}; +use jsonrpc_core::futures::sync::oneshot; use v1::helpers::errors; pub type Res = Result; diff --git a/rpc/src/v1/helpers/subscription_manager.rs b/rpc/src/v1/helpers/subscription_manager.rs index f529d4810..a6d6f9d51 100644 --- a/rpc/src/v1/helpers/subscription_manager.rs +++ b/rpc/src/v1/helpers/subscription_manager.rs @@ -22,8 +22,8 @@ use parking_lot::Mutex; use jsonrpc_core::futures::future::{self, Either}; use jsonrpc_core::futures::sync::mpsc; -use jsonrpc_core::futures::{Sink, Future, BoxFuture}; -use jsonrpc_core::{self as core, MetaIoHandler}; +use jsonrpc_core::futures::{Sink, Future}; +use jsonrpc_core::{self as core, MetaIoHandler, BoxFuture}; use jsonrpc_pubsub::SubscriptionId; use v1::helpers::Subscribers; @@ -130,7 +130,7 @@ impl> GenericPollManager { } // return a future represeting all the polls - future::join_all(futures).map(|_| ()).boxed() + Box::new(future::join_all(futures).map(|_| ())) } } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 5cf8590ce..3afdf2d74 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -20,7 +20,6 @@ use std::thread; use std::time::{Instant, Duration}; use std::sync::Arc; -use futures::{self, future, BoxFuture, Future}; use rlp::{self, UntrustedRlp}; use time::get_time; use bigint::prelude::U256; @@ -41,7 +40,8 @@ use ethcore::transaction::SignedTransaction; use ethcore::snapshot::SnapshotService; use ethsync::{SyncProvider}; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::future; use jsonrpc_macros::Trailing; use v1::helpers::{errors, limit_logs, fake_sign}; @@ -318,19 +318,15 @@ impl Eth for EthClient where } } - fn author(&self, meta: Metadata) -> BoxFuture { + fn author(&self, meta: Metadata) -> Result { let dapp = meta.dapp_id(); - let author = move || { - let mut miner = self.miner.author(); - if miner == 0.into() { - miner = self.dapp_accounts(dapp.into())?.get(0).cloned().unwrap_or_default(); - } + let mut miner = self.miner.author(); + if miner == 0.into() { + miner = self.dapp_accounts(dapp.into())?.get(0).cloned().unwrap_or_default(); + } - Ok(RpcH160::from(miner)) - }; - - futures::done(author()).boxed() + Ok(RpcH160::from(miner)) } fn is_mining(&self) -> Result { @@ -345,15 +341,11 @@ impl Eth for EthClient where Ok(RpcU256::from(default_gas_price(&*self.client, &*self.miner))) } - fn accounts(&self, meta: Metadata) -> BoxFuture, Error> { + fn accounts(&self, meta: Metadata) -> Result, Error> { let dapp = meta.dapp_id(); - let accounts = move || { - let accounts = self.dapp_accounts(dapp.into())?; - Ok(accounts.into_iter().map(Into::into).collect()) - }; - - futures::done(accounts()).boxed() + let accounts = self.dapp_accounts(dapp.into())?; + Ok(accounts.into_iter().map(Into::into).collect()) } fn block_number(&self) -> Result { @@ -371,7 +363,7 @@ impl Eth for EthClient where None => Err(errors::state_pruned()), }; - future::done(res).boxed() + Box::new(future::done(res)) } fn storage_at(&self, address: RpcH160, pos: RpcU256, num: Trailing) -> BoxFuture { @@ -386,7 +378,7 @@ impl Eth for EthClient where None => Err(errors::state_pruned()), }; - future::done(res).boxed() + Box::new(future::done(res)) } fn transaction_count(&self, address: RpcH160, num: Trailing) -> BoxFuture { @@ -411,38 +403,37 @@ impl Eth for EthClient where } }; - future::done(res).boxed() + Box::new(future::done(res)) } fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture, Error> { - future::ok(self.client.block(BlockId::Hash(hash.into())) - .map(|block| block.transactions_count().into())).boxed() + Box::new(future::ok(self.client.block(BlockId::Hash(hash.into())) + .map(|block| block.transactions_count().into()))) } fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture, Error> { - future::ok(match num { + Box::new(future::ok(match num { BlockNumber::Pending => Some( self.miner.status().transactions_in_pending_block.into() ), _ => self.client.block(num.into()) .map(|block| block.transactions_count().into()) - }).boxed() + })) } fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture, Error> { - future::ok(self.client.block(BlockId::Hash(hash.into())) - .map(|block| block.uncles_count().into())) - .boxed() + Box::new(future::ok(self.client.block(BlockId::Hash(hash.into())) + .map(|block| block.uncles_count().into()))) } fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture, Error> { - future::ok(match num { + Box::new(future::ok(match num { BlockNumber::Pending => Some(0.into()), _ => self.client.block(num.into()) .map(|block| block.uncles_count().into() ), - }).boxed() + })) } fn code_at(&self, address: RpcH160, num: Trailing) -> BoxFuture { @@ -456,15 +447,15 @@ impl Eth for EthClient where None => Err(errors::state_pruned()), }; - future::done(res).boxed() + Box::new(future::done(res)) } fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture, Error> { - future::done(self.block(BlockId::Hash(hash.into()), include_txs)).boxed() + Box::new(future::done(self.block(BlockId::Hash(hash.into()), include_txs))) } fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture, Error> { - future::done(self.block(num.into(), include_txs)).boxed() + Box::new(future::done(self.block(num.into(), include_txs))) } fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { @@ -521,7 +512,7 @@ impl Eth for EthClient where let logs = limit_logs(logs, filter.limit); - future::ok(logs).boxed() + Box::new(future::ok(logs)) } fn work(&self, no_new_work_timeout: Trailing) -> Result { @@ -615,30 +606,24 @@ impl Eth for EthClient where fn call(&self, meta: Self::Metadata, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = match fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()) { - Ok(signed) => signed, - Err(e) => return future::err(e).boxed(), - }; + let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())); let num = num.unwrap_or_default(); let result = self.client.call(&signed, Default::default(), num.into()); - future::done(result + Box::new(future::done(result .map(|b| b.output.into()) .map_err(errors::call) - ).boxed() + )) } fn estimate_gas(&self, meta: Self::Metadata, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = match fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()) { - Ok(signed) => signed, - Err(e) => return future::err(e).boxed(), - }; - future::done(self.client.estimate_gas(&signed, num.unwrap_or_default().into()) + let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())); + Box::new(future::done(self.client.estimate_gas(&signed, num.unwrap_or_default().into()) .map(Into::into) .map_err(errors::call) - ).boxed() + )) } fn compile_lll(&self, _: String) -> Result { diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index 1cc880f3c..a7a4a3ed2 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -19,15 +19,15 @@ use std::sync::Arc; use std::collections::HashSet; -use jsonrpc_core::*; use ethcore::miner::MinerService; use ethcore::filter::Filter as EthcoreFilter; use ethcore::client::{BlockChainClient, BlockId}; use bigint::hash::H256; use parking_lot::Mutex; -use futures::{future, Future, BoxFuture}; - +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; +use jsonrpc_core::futures::future::Either; use v1::traits::EthFilter; use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256}; use v1::helpers::{PollFilter, PollManager, limit_logs}; @@ -89,7 +89,7 @@ impl Filterable for EthFilterClient where C: BlockChainClient, M: Mi } fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error> { - future::ok(self.client.logs(filter).into_iter().map(Into::into).collect()).boxed() + Box::new(future::ok(self.client.logs(filter).into_iter().map(Into::into).collect())) } fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec { @@ -125,8 +125,8 @@ impl EthFilter for T { fn filter_changes(&self, index: Index) -> BoxFuture { let mut polls = self.polls().lock(); - match polls.poll_mut(&index.value()) { - None => future::ok(FilterChanges::Empty).boxed(), + Box::new(match polls.poll_mut(&index.value()) { + None => Either::A(future::ok(FilterChanges::Empty)), Some(filter) => match *filter { PollFilter::Block(ref mut block_number) => { // + 1, cause we want to return hashes including current block hash. @@ -138,7 +138,7 @@ impl EthFilter for T { *block_number = current_number; - future::ok(FilterChanges::Hashes(hashes)).boxed() + Either::A(future::ok(FilterChanges::Hashes(hashes))) }, PollFilter::PendingTransaction(ref mut previous_hashes) => { // get hashes of pending transactions @@ -162,7 +162,7 @@ impl EthFilter for T { *previous_hashes = current_hashes; // return new hashes - future::ok(FilterChanges::Hashes(new_hashes)).boxed() + Either::A(future::ok(FilterChanges::Hashes(new_hashes))) }, PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { // retrive the current block number @@ -200,14 +200,13 @@ impl EthFilter for T { // retrieve logs in range from_block..min(BlockId::Latest..to_block) let limit = filter.limit; - self.logs(filter) + Either::B(self.logs(filter) .map(move |mut logs| { logs.extend(pending); logs }) // append fetched pending logs .map(move |logs| limit_logs(logs, limit)) // limit the logs - .map(FilterChanges::Logs) - .boxed() + .map(FilterChanges::Logs)) } } - } + }) } fn filter_logs(&self, index: Index) -> BoxFuture, Error> { @@ -217,7 +216,7 @@ impl EthFilter for T { match polls.poll(&index.value()) { Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => filter.clone(), // just empty array - _ => return future::ok(Vec::new()).boxed(), + _ => return Box::new(future::ok(Vec::new())), } }; @@ -235,11 +234,10 @@ impl EthFilter for T { // retrieve logs asynchronously, appending pending logs. let limit = filter.limit; let logs = self.logs(filter); - let res = logs + Box::new(logs .map(move |mut logs| { logs.extend(pending); logs }) .map(move |logs| limit_logs(logs, limit)) - .boxed(); - res + ) } fn uninstall_filter(&self, index: Index) -> Result { diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/rpc/src/v1/impls/eth_pubsub.rs index a5770f0e5..74b37d7d0 100644 --- a/rpc/src/v1/impls/eth_pubsub.rs +++ b/rpc/src/v1/impls/eth_pubsub.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use std::collections::BTreeMap; -use futures::{self, future, BoxFuture, Future}; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{self, Future, IntoFuture}; use jsonrpc_macros::Trailing; use jsonrpc_macros::pubsub::{Sink, Subscriber}; use jsonrpc_pubsub::SubscriptionId; @@ -131,8 +131,10 @@ impl ChainNotificationHandler { } } - fn notify_logs(&self, enacted: &[H256], logs: F) where - F: Fn(EthFilter) -> BoxFuture, Error>, + fn notify_logs(&self, enacted: &[H256], logs: F) where + F: Fn(EthFilter) -> T, + T: IntoFuture, Error = Error>, + T::Future: Send + 'static, { for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { let logs = futures::future::join_all(enacted @@ -141,7 +143,7 @@ impl ChainNotificationHandler { let mut filter = filter.clone(); filter.from_block = BlockId::Hash(*hash); filter.to_block = filter.from_block.clone(); - logs(filter) + logs(filter).into_future() }) .collect::>() ); @@ -224,15 +226,15 @@ impl ChainNotify for ChainNotificationHandler { // Enacted logs self.notify_logs(&enacted, |filter| { - future::ok(self.client.logs(filter).into_iter().map(Into::into).collect()).boxed() + Ok(self.client.logs(filter).into_iter().map(Into::into).collect()) }); // Retracted logs self.notify_logs(&retracted, |filter| { - future::ok(self.client.logs(filter).into_iter().map(Into::into).map(|mut log: Log| { + Ok(self.client.logs(filter).into_iter().map(Into::into).map(|mut log: Log| { log.log_type = "removed".into(); log - }).collect()).boxed() + }).collect()) }); } } @@ -270,10 +272,10 @@ impl EthPubSub for EthPubSubClient { let _ = subscriber.reject(error); } - fn unsubscribe(&self, id: SubscriptionId) -> BoxFuture { + fn unsubscribe(&self, id: SubscriptionId) -> Result { let res = self.heads_subscribers.write().remove(&id).is_some(); let res2 = self.logs_subscribers.write().remove(&id).is_some(); - future::ok(res || res2).boxed() + Ok(res || res2) } } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index cb4550427..0f7438eb0 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -16,12 +16,11 @@ //! Eth RPC interface for the light client. -// TODO: remove when complete. -#![allow(unused_imports, unused_variables)] - use std::sync::Arc; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; +use jsonrpc_core::futures::future::Either; use jsonrpc_macros::Trailing; use light::cache::Cache as LightDataCache; @@ -30,25 +29,20 @@ use light::{cht, TransactionQueue}; use light::on_demand::{request, OnDemand}; use ethcore::account_provider::{AccountProvider, DappId}; -use ethcore::basic_account::BasicAccount; use ethcore::encoded; -use ethcore::executed::{Executed, ExecutionError}; use ethcore::ids::BlockId; use ethcore::filter::Filter as EthcoreFilter; -use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction}; +use ethcore::transaction::SignedTransaction; use ethsync::LightSync; use rlp::UntrustedRlp; use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; use bigint::prelude::U256; use parking_lot::{RwLock, Mutex}; -use futures::{future, Future, BoxFuture, IntoFuture}; -use futures::sync::oneshot; use v1::impls::eth_filter::Filterable; -use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch}; +use v1::helpers::{errors, limit_logs}; use v1::helpers::{PollFilter, PollManager}; -use v1::helpers::block_import::is_major_importing; use v1::helpers::light_fetch::LightFetch; use v1::traits::Eth; use v1::types::{ @@ -58,8 +52,6 @@ use v1::types::{ }; use v1::metadata::Metadata; -use util::Address; - const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; /// Light client `ETH` (and filter) RPC. @@ -162,10 +154,10 @@ impl EthClient { }; // get the block itself. - self.fetcher().block(id).and_then(move |block| { + Box::new(self.fetcher().block(id).and_then(move |block| { // then fetch the total difficulty (this is much easier after getting the block). match client.score(id) { - Some(score) => future::ok(fill_rich(block, Some(score))).boxed(), + Some(score) => Either::A(future::ok(fill_rich(block, Some(score)))), None => { // make a CHT request to fetch the chain score. let req = cht::block_to_cht_number(block.number()) @@ -181,7 +173,7 @@ impl EthClient { .expect("genesis always stored; qed") .difficulty(); - return future::ok(fill_rich(block, Some(score))).boxed() + return Either::A(future::ok(fill_rich(block, Some(score)))) } }; @@ -191,7 +183,7 @@ impl EthClient { // - we get a score, and our hash is canonical. let maybe_fut = sync.with_context(move |ctx| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS)); match maybe_fut { - Some(fut) => fut + Some(fut) => Either::B(fut .map(move |(hash, score)| { let score = if hash == block.hash() { Some(score) @@ -199,13 +191,13 @@ impl EthClient { None }; - fill_rich(block, score) - }).map_err(errors::on_demand_cancel).boxed(), - None => return future::err(errors::network_disabled()).boxed(), + fill_rich(block, score) + }).map_err(errors::on_demand_cancel)), + None => Either::A(future::err(errors::network_disabled())), } } } - }).boxed() + })) } } @@ -235,8 +227,8 @@ impl Eth for EthClient { } } - fn author(&self, _meta: Self::Metadata) -> BoxFuture { - future::ok(Default::default()).boxed() + fn author(&self, _meta: Self::Metadata) -> Result { + Ok(Default::default()) } fn is_mining(&self) -> Result { @@ -254,16 +246,14 @@ impl Eth for EthClient { .unwrap_or_else(Default::default)) } - fn accounts(&self, meta: Metadata) -> BoxFuture, Error> { + fn accounts(&self, meta: Metadata) -> Result, Error> { let dapp: DappId = meta.dapp_id().into(); - let accounts = self.accounts + self.accounts .note_dapp_used(dapp.clone()) .and_then(|_| self.accounts.dapp_addresses(dapp)) .map_err(|e| errors::account("Could not fetch accounts.", e)) - .map(|accs| accs.into_iter().map(Into::::into).collect()); - - future::done(accounts).boxed() + .map(|accs| accs.into_iter().map(Into::::into).collect()) } fn block_number(&self) -> Result { @@ -271,93 +261,93 @@ impl Eth for EthClient { } fn balance(&self, address: RpcH160, num: Trailing) -> BoxFuture { - self.fetcher().account(address.into(), num.unwrap_or_default().into()) - .map(|acc| acc.map_or(0.into(), |a| a.balance).into()).boxed() + Box::new(self.fetcher().account(address.into(), num.unwrap_or_default().into()) + .map(|acc| acc.map_or(0.into(), |a| a.balance).into())) } fn storage_at(&self, _address: RpcH160, _key: RpcU256, _num: Trailing) -> BoxFuture { - future::err(errors::unimplemented(None)).boxed() + Box::new(future::err(errors::unimplemented(None))) } fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture, Error> { - self.rich_block(BlockId::Hash(hash.into()), include_txs).map(Some).boxed() + Box::new(self.rich_block(BlockId::Hash(hash.into()), include_txs).map(Some)) } fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture, Error> { - self.rich_block(num.into(), include_txs).map(Some).boxed() + Box::new(self.rich_block(num.into(), include_txs).map(Some)) } fn transaction_count(&self, address: RpcH160, num: Trailing) -> BoxFuture { - self.fetcher().account(address.into(), num.unwrap_or_default().into()) - .map(|acc| acc.map_or(0.into(), |a| a.nonce).into()).boxed() + Box::new(self.fetcher().account(address.into(), num.unwrap_or_default().into()) + .map(|acc| acc.map_or(0.into(), |a| a.nonce).into())) } fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture, Error> { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { + Box::new(self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { if hdr.transactions_root() == KECCAK_NULL_RLP { - future::ok(Some(U256::from(0).into())).boxed() + Either::A(future::ok(Some(U256::from(0).into()))) } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into()))) - .map(|x| x.map_err(errors::on_demand_cancel).boxed()) - .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) + .map(|x| Either::B(x.map_err(errors::on_demand_cancel))) + .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) } - }).boxed() + })) } fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture, Error> { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - self.fetcher().header(num.into()).and_then(move |hdr| { + Box::new(self.fetcher().header(num.into()).and_then(move |hdr| { if hdr.transactions_root() == KECCAK_NULL_RLP { - future::ok(Some(U256::from(0).into())).boxed() + Either::A(future::ok(Some(U256::from(0).into()))) } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into()))) - .map(|x| x.map_err(errors::on_demand_cancel).boxed()) - .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) + .map(|x| Either::B(x.map_err(errors::on_demand_cancel))) + .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) } - }).boxed() + })) } fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture, Error> { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { + Box::new(self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { - future::ok(Some(U256::from(0).into())).boxed() + Either::A(future::ok(Some(U256::from(0).into()))) } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into()))) - .map(|x| x.map_err(errors::on_demand_cancel).boxed()) - .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) + .map(|x| Either::B(x.map_err(errors::on_demand_cancel))) + .unwrap_or_else(|| Either::A(future::err(errors::network_disabled()))) } - }).boxed() + })) } fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture, Error> { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); - self.fetcher().header(num.into()).and_then(move |hdr| { + Box::new(self.fetcher().header(num.into()).and_then(move |hdr| { if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { - future::ok(Some(U256::from(0).into())).boxed() + Either::B(future::ok(Some(U256::from(0).into()))) } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) .map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into()))) - .map(|x| x.map_err(errors::on_demand_cancel).boxed()) - .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) + .map(|x| Either::A(x.map_err(errors::on_demand_cancel))) + .unwrap_or_else(|| Either::B(future::err(errors::network_disabled()))) } - }).boxed() + })) } - fn code_at(&self, address: RpcH160, num: Trailing) -> BoxFuture { - future::err(errors::unimplemented(None)).boxed() + fn code_at(&self, _address: RpcH160, _num: Trailing) -> BoxFuture { + Box::new(future::err(errors::unimplemented(None))) } fn send_raw_transaction(&self, raw: Bytes) -> Result { @@ -385,45 +375,45 @@ impl Eth for EthClient { } fn call(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing) -> BoxFuture { - self.fetcher().proved_execution(req, num).and_then(|res| { + Box::new(self.fetcher().proved_execution(req, num).and_then(|res| { match res { Ok(exec) => Ok(exec.output.into()), Err(e) => Err(errors::execution(e)), } - }).boxed() + })) } fn estimate_gas(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing) -> BoxFuture { // TODO: binary chop for more accurate estimates. - self.fetcher().proved_execution(req, num).and_then(|res| { + Box::new(self.fetcher().proved_execution(req, num).and_then(|res| { match res { Ok(exec) => Ok((exec.refunded + exec.gas_used).into()), Err(e) => Err(errors::execution(e)), } - }).boxed() + })) } - fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { + fn transaction_by_hash(&self, _hash: RpcH256) -> Result, Error> { Err(errors::unimplemented(None)) } - fn transaction_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> Result, Error> { + fn transaction_by_block_hash_and_index(&self, _hash: RpcH256, _idx: Index) -> Result, Error> { Err(errors::unimplemented(None)) } - fn transaction_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> Result, Error> { + fn transaction_by_block_number_and_index(&self, _num: BlockNumber, _idx: Index) -> Result, Error> { Err(errors::unimplemented(None)) } - fn transaction_receipt(&self, hash: RpcH256) -> Result, Error> { + fn transaction_receipt(&self, _hash: RpcH256) -> Result, Error> { Err(errors::unimplemented(None)) } - fn uncle_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> Result, Error> { + fn uncle_by_block_hash_and_index(&self, _hash: RpcH256, _idx: Index) -> Result, Error> { Err(errors::unimplemented(None)) } - fn uncle_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> Result, Error> { + fn uncle_by_block_number_and_index(&self, _num: BlockNumber, _idx: Index) -> Result, Error> { Err(errors::unimplemented(None)) } @@ -447,9 +437,8 @@ impl Eth for EthClient { fn logs(&self, filter: Filter) -> BoxFuture, Error> { let limit = filter.limit; - Filterable::logs(self, filter.into()) - .map(move|logs| limit_logs(logs, limit)) - .boxed() + Box::new(Filterable::logs(self, filter.into()) + .map(move|logs| limit_logs(logs, limit))) } fn work(&self, _timeout: Trailing) -> Result { diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index fd617ae85..a896a5f4d 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -17,7 +17,6 @@ //! Parity-specific rpc implementation. use std::sync::Arc; use std::collections::{BTreeMap, HashSet}; -use futures::{future, Future, BoxFuture}; use util::misc::version_data; @@ -31,7 +30,8 @@ use node_health::{NodeHealth, Health}; use light::client::LightChainClient; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::Future; use jsonrpc_macros::Trailing; use v1::helpers::{self, errors, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::dispatch::LightDispatcher; @@ -140,15 +140,14 @@ impl Parity for ParityClient { Ok(store.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e))?) } - fn default_account(&self, meta: Self::Metadata) -> BoxFuture { + fn default_account(&self, meta: Self::Metadata) -> Result { let dapp_id = meta.dapp_id(); - future::ok(self.accounts + Ok(self.accounts .dapp_addresses(dapp_id.into()) .ok() .and_then(|accounts| accounts.get(0).cloned()) .map(|acc| acc.into()) - .unwrap_or_default() - ).boxed() + .unwrap_or_default()) } fn transactions_limit(&self) -> Result { @@ -221,10 +220,9 @@ impl Parity for ParityClient { } fn gas_price_histogram(&self) -> BoxFuture { - self.light_dispatch.gas_price_corpus() + Box::new(self.light_dispatch.gas_price_corpus() .and_then(|corpus| corpus.histogram(10).ok_or_else(errors::not_enough_data)) - .map(Into::into) - .boxed() + .map(Into::into)) } fn unsigned_transactions_count(&self) -> Result { @@ -316,7 +314,7 @@ impl Parity for ParityClient { } fn next_nonce(&self, address: H160) -> BoxFuture { - self.light_dispatch.next_nonce(address.into()).map(Into::into).boxed() + Box::new(self.light_dispatch.next_nonce(address.into()).map(Into::into)) } fn mode(&self) -> Result { @@ -398,20 +396,19 @@ impl Parity for ParityClient { } }; - self.fetcher().header(number.unwrap_or_default().into()).map(from_encoded).boxed() + Box::new(self.fetcher().header(number.unwrap_or_default().into()).map(from_encoded)) } fn ipfs_cid(&self, content: Bytes) -> Result { ipfs::cid(content) } - fn call(&self, _meta: Self::Metadata, _requests: Vec, _block: Trailing) -> BoxFuture, Error> { - future::err(errors::light_unimplemented(None)).boxed() + fn call(&self, _meta: Self::Metadata, _requests: Vec, _block: Trailing) -> Result, Error> { + Err(errors::light_unimplemented(None)) } fn node_health(&self) -> BoxFuture { - self.health.health() - .map_err(|err| errors::internal("Health API failure.", err)) - .boxed() + Box::new(self.health.health() + .map_err(|err| errors::internal("Health API failure.", err))) } } diff --git a/rpc/src/v1/impls/light/parity_set.rs b/rpc/src/v1/impls/light/parity_set.rs index bac6556e3..2887f2836 100644 --- a/rpc/src/v1/impls/light/parity_set.rs +++ b/rpc/src/v1/impls/light/parity_set.rs @@ -22,10 +22,10 @@ use std::sync::Arc; use ethsync::ManageNetwork; use fetch::Fetch; -use futures::{BoxFuture, Future}; use hash::keccak_buffer; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::Future; use v1::helpers::dapps::DappsService; use v1::helpers::errors; use v1::traits::ParitySet; diff --git a/rpc/src/v1/impls/light/trace.rs b/rpc/src/v1/impls/light/trace.rs index 22360183d..742944e36 100644 --- a/rpc/src/v1/impls/light/trace.rs +++ b/rpc/src/v1/impls/light/trace.rs @@ -17,7 +17,6 @@ //! Traces api implementation. use jsonrpc_core::Error; -use jsonrpc_core::futures::{future, Future, BoxFuture}; use jsonrpc_macros::Trailing; use v1::Metadata; use v1::traits::Traces; @@ -47,12 +46,12 @@ impl Traces for TracesClient { Err(errors::light_unimplemented(None)) } - fn call(&self, _meta: Self::Metadata, _request: CallRequest, _flags: TraceOptions, _block: Trailing) -> BoxFuture { - future::err(errors::light_unimplemented(None)).boxed() + fn call(&self, _meta: Self::Metadata, _request: CallRequest, _flags: TraceOptions, _block: Trailing) -> Result { + Err(errors::light_unimplemented(None)) } - fn call_many(&self, _meta: Self::Metadata, _request: Vec<(CallRequest, TraceOptions)>, _block: Trailing) -> BoxFuture, Error> { - future::err(errors::light_unimplemented(None)).boxed() + fn call_many(&self, _meta: Self::Metadata, _request: Vec<(CallRequest, TraceOptions)>, _block: Trailing) -> Result, Error> { + Err(errors::light_unimplemented(None)) } fn raw_transaction(&self, _raw_transaction: Bytes, _flags: TraceOptions, _block: Trailing) -> Result { diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 0f68e654f..a08d60efd 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -18,7 +18,6 @@ use std::sync::Arc; use std::str::FromStr; use std::collections::{BTreeMap, HashSet}; -use futures::{future, Future, BoxFuture}; use util::Address; use util::misc::version_data; @@ -32,12 +31,12 @@ use ethcore::client::{MiningBlockChainClient}; use ethcore::ids::BlockId; use ethcore::miner::MinerService; use ethcore::mode::Mode; -use ethcore::transaction::SignedTransaction; use ethcore_logger::RotatingLogger; use node_health::{NodeHealth, Health}; use updater::{Service as UpdateService}; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; use jsonrpc_macros::Trailing; use v1::helpers::{self, errors, fake_sign, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::accounts::unwrap_provider; @@ -157,15 +156,14 @@ impl Parity for ParityClient where Ok(store.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e))?) } - fn default_account(&self, meta: Self::Metadata) -> BoxFuture { + fn default_account(&self, meta: Self::Metadata) -> Result { let dapp_id = meta.dapp_id(); - future::ok( - try_bf!(self.account_provider()) - .dapp_default_address(dapp_id.into()) - .map(Into::into) - .ok() - .unwrap_or_default() - ).boxed() + + Ok(self.account_provider()? + .dapp_default_address(dapp_id.into()) + .map(Into::into) + .ok() + .unwrap_or_default()) } fn transactions_limit(&self) -> Result { @@ -253,12 +251,12 @@ impl Parity for ParityClient where } fn gas_price_histogram(&self) -> BoxFuture { - future::done(self.client + Box::new(future::done(self.client .gas_price_corpus(100) .histogram(10) .ok_or_else(errors::not_enough_data) .map(Into::into) - ).boxed() + )) } fn unsigned_transactions_count(&self) -> Result { @@ -340,11 +338,11 @@ impl Parity for ParityClient where fn next_nonce(&self, address: H160) -> BoxFuture { let address: Address = address.into(); - future::ok(self.miner.last_nonce(&address) + Box::new(future::ok(self.miner.last_nonce(&address) .map(|n| n + 1.into()) .unwrap_or_else(|| self.client.latest_nonce(&address)) .into() - ).boxed() + )) } fn mode(&self) -> Result { @@ -403,41 +401,37 @@ impl Parity for ParityClient where let id: BlockId = number.unwrap_or_default().into(); let encoded = match self.client.block_header(id.clone()) { Some(encoded) => encoded, - None => return future::err(errors::unknown_block()).boxed(), + None => return Box::new(future::err(errors::unknown_block())), }; - future::ok(RichHeader { + Box::new(future::ok(RichHeader { inner: encoded.into(), extra_info: self.client.block_extra_info(id).expect(EXTRA_INFO_PROOF), - }).boxed() + })) } fn ipfs_cid(&self, content: Bytes) -> Result { ipfs::cid(content) } - fn call(&self, meta: Self::Metadata, requests: Vec, block: Trailing) -> BoxFuture, Error> { - let requests: Result, Error> = requests + fn call(&self, meta: Self::Metadata, requests: Vec, block: Trailing) -> Result, Error> { + let requests = requests .into_iter() .map(|request| Ok(( fake_sign::sign_call(&self.client, &self.miner, request.into(), meta.is_dapp())?, Default::default() ))) - .collect(); + .collect::, Error>>()?; let block = block.unwrap_or_default(); - let requests = try_bf!(requests); - let result = self.client.call_many(&requests, block.into()) + self.client.call_many(&requests, block.into()) .map(|res| res.into_iter().map(|res| res.output.into()).collect()) - .map_err(errors::call); - - future::done(result).boxed() + .map_err(errors::call) } fn node_health(&self) -> BoxFuture { - self.health.health() - .map_err(|err| errors::internal("Health API failure.", err)) - .boxed() + Box::new(self.health.health() + .map_err(|err| errors::internal("Health API failure.", err))) } } diff --git a/rpc/src/v1/impls/parity_accounts.rs b/rpc/src/v1/impls/parity_accounts.rs index 259e4802d..d41837186 100644 --- a/rpc/src/v1/impls/parity_accounts.rs +++ b/rpc/src/v1/impls/parity_accounts.rs @@ -68,7 +68,7 @@ impl ParityAccounts for ParityAccountsClient { for (address, account) in account_iter { match accounts.entry(address) { - /// Insert only if occupied entry isn't already an account with UUID + // Insert only if occupied entry isn't already an account with UUID Entry::Occupied(ref mut occupied) if occupied.get().uuid.is_none() => { occupied.insert(account); }, diff --git a/rpc/src/v1/impls/parity_set.rs b/rpc/src/v1/impls/parity_set.rs index 38890d6d1..5e01455be 100644 --- a/rpc/src/v1/impls/parity_set.rs +++ b/rpc/src/v1/impls/parity_set.rs @@ -23,11 +23,11 @@ use ethcore::client::MiningBlockChainClient; use ethcore::mode::Mode; use ethsync::ManageNetwork; use fetch::{self, Fetch}; -use futures::{BoxFuture, Future}; use hash::keccak_buffer; use updater::{Service as UpdateService}; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::Future; use v1::helpers::dapps::DappsService; use v1::helpers::errors; use v1::traits::ParitySet; diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 4573b8d1a..cfc7521d9 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -24,8 +24,8 @@ use bigint::prelude::U128; use util::Address; use bytes::ToPretty; -use futures::{future, Future, BoxFuture}; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; use v1::helpers::errors; use v1::helpers::dispatch::{Dispatcher, SignWith}; use v1::helpers::accounts::unwrap_provider; @@ -114,10 +114,10 @@ impl Personal for PersonalClient { let default = match default { Ok(default) => default, - Err(e) => return future::err(e).boxed(), + Err(e) => return Box::new(future::err(e)), }; - dispatcher.fill_optional_fields(request.into(), default, false) + Box::new(dispatcher.fill_optional_fields(request.into(), default, false) .and_then(move |filled| { let condition = filled.condition.clone().map(Into::into); dispatcher.sign(accounts, filled, SignWith::Password(password)) @@ -131,8 +131,7 @@ impl Personal for PersonalClient { ::rlp::encode(&*pending_tx).into_vec().pretty(), chain_id); dispatcher.dispatch_transaction(pending_tx).map(Into::into) - }) - .boxed() + })) } fn sign_and_send_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture { diff --git a/rpc/src/v1/impls/pubsub.rs b/rpc/src/v1/impls/pubsub.rs index 44b9fcbeb..04234bf2a 100644 --- a/rpc/src/v1/impls/pubsub.rs +++ b/rpc/src/v1/impls/pubsub.rs @@ -20,8 +20,8 @@ use std::sync::Arc; use std::time::Duration; use parking_lot::RwLock; -use futures::{self, BoxFuture, Future, Stream, Sink}; use jsonrpc_core::{self as core, Error, MetaIoHandler}; +use jsonrpc_core::futures::{Future, Stream, Sink}; use jsonrpc_macros::Trailing; use jsonrpc_macros::pubsub::Subscriber; use jsonrpc_pubsub::SubscriptionId; @@ -94,8 +94,8 @@ impl> PubSub for PubSubClient { } } - fn parity_unsubscribe(&self, id: SubscriptionId) -> BoxFuture { + fn parity_unsubscribe(&self, id: SubscriptionId) -> Result { let res = self.poll_manager.write().unsubscribe(&id); - futures::future::ok(res).boxed() + Ok(res) } } diff --git a/rpc/src/v1/impls/signer.rs b/rpc/src/v1/impls/signer.rs index a01b66364..c864134fb 100644 --- a/rpc/src/v1/impls/signer.rs +++ b/rpc/src/v1/impls/signer.rs @@ -21,12 +21,13 @@ use std::sync::Arc; use ethcore::account_provider::AccountProvider; use ethcore::transaction::{SignedTransaction, PendingTransaction}; use ethkey; -use futures::{future, BoxFuture, Future, IntoFuture}; use parity_reactor::Remote; use rlp::UntrustedRlp; use parking_lot::Mutex; -use jsonrpc_core::{futures, Error}; +use jsonrpc_core::{Error, BoxFuture}; +use jsonrpc_core::futures::{future, Future, IntoFuture}; +use jsonrpc_core::futures::future::Either; use jsonrpc_pubsub::SubscriptionId; use jsonrpc_macros::pubsub::{Sink, Subscriber}; use v1::helpers::accounts::unwrap_provider; @@ -87,18 +88,11 @@ impl SignerClient { T::Future: Send + 'static { let id = id.into(); + let accounts = try_bf!(self.account_provider()); let dispatcher = self.dispatcher.clone(); + let signer = self.signer.clone(); - let setup = || { - Ok((self.account_provider()?, self.signer.clone())) - }; - - let (accounts, signer) = match setup() { - Ok(x) => x, - Err(e) => return future::err(e).boxed(), - }; - - signer.peek(&id).map(|confirmation| { + Box::new(signer.peek(&id).map(|confirmation| { let mut payload = confirmation.payload.clone(); // Modify payload if let ConfirmationPayload::SendTransaction(ref mut request) = payload { @@ -118,16 +112,16 @@ impl SignerClient { } } let fut = f(dispatcher, accounts, payload); - fut.into_future().then(move |result| { + Either::A(fut.into_future().then(move |result| { // Execute if let Ok(ref response) = result { signer.request_confirmed(id, Ok((*response).clone())); } result - }).boxed() + })) }) - .unwrap_or_else(|| future::err(errors::invalid_params("Unknown RequestID", id)).boxed()) + .unwrap_or_else(|| Either::B(future::err(errors::invalid_params("Unknown RequestID", id))))) } fn verify_transaction(bytes: Bytes, request: FilledTransactionRequest, process: F) -> Result where @@ -178,15 +172,15 @@ impl Signer for SignerClient { fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String) -> BoxFuture { - self.confirm_internal(id, modification, move |dis, accounts, payload| { + Box::new(self.confirm_internal(id, modification, move |dis, accounts, payload| { dispatch::execute(dis, accounts, payload, dispatch::SignWith::Password(pass)) - }).map(|v| v.into_value()).boxed() + }).map(|v| v.into_value())) } fn confirm_request_with_token(&self, id: U256, modification: TransactionModification, token: String) -> BoxFuture { - self.confirm_internal(id, modification, move |dis, accounts, payload| { + Box::new(self.confirm_internal(id, modification, move |dis, accounts, payload| { dispatch::execute(dis, accounts, payload, dispatch::SignWith::Token(token)) }).and_then(|v| match v { WithToken::No(_) => Err(errors::internal("Unexpected response without token.", "")), @@ -194,7 +188,7 @@ impl Signer for SignerClient { result: response, token: token, }), - }).boxed() + })) } fn confirm_request_raw(&self, id: U256, bytes: Bytes) -> Result { @@ -253,8 +247,8 @@ impl Signer for SignerClient { self.subscribers.lock().push(sub) } - fn unsubscribe_pending(&self, id: SubscriptionId) -> BoxFuture { + fn unsubscribe_pending(&self, id: SubscriptionId) -> Result { let res = self.subscribers.lock().remove(&id).is_some(); - futures::future::ok(res).boxed() + Ok(res) } } diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index 725580352..0f1217069 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -23,8 +23,9 @@ use parking_lot::Mutex; use ethcore::account_provider::AccountProvider; -use futures::{future, BoxFuture, Future}; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; +use jsonrpc_core::futures::future::Either; use v1::helpers::{ errors, oneshot, DefaultAccount, @@ -115,23 +116,21 @@ impl SigningQueueClient { let dispatcher = self.dispatcher.clone(); let signer = self.signer.clone(); - dispatch::from_rpc(payload, default_account, &dispatcher) + Box::new(dispatch::from_rpc(payload, default_account, &dispatcher) .and_then(move |payload| { let sender = payload.sender(); if accounts.is_unlocked(sender) { - dispatch::execute(dispatcher, accounts, payload, dispatch::SignWith::Nothing) + Either::A(dispatch::execute(dispatcher, accounts, payload, dispatch::SignWith::Nothing) .map(|v| v.into_value()) - .map(DispatchResult::Value) - .boxed() + .map(DispatchResult::Value)) } else { - future::done( + Either::B(future::done( signer.add_request(payload, origin) .map(DispatchResult::Promise) .map_err(|_| errors::request_rejected_limit()) - ).boxed() + )) } - }) - .boxed() + })) } } @@ -141,12 +140,12 @@ impl ParitySigning for SigningQueueClient { fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { let accounts = try_bf!(self.account_provider()); let default_account = accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default(); - self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into).boxed() + Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into)) } fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture, Error> { let pending = self.pending.clone(); - self.dispatch( + Box::new(self.dispatch( RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), DefaultAccount::Provided(address.into()), meta.origin @@ -160,13 +159,12 @@ impl ParitySigning for SigningQueueClient { RpcEither::Either(id.into()) }, - }) - .boxed() + })) } fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture, Error> { let pending = self.pending.clone(); - self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin) + Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin) .map(move |result| match result { DispatchResult::Value(v) => RpcEither::Or(v), DispatchResult::Promise(promise) => { @@ -177,8 +175,7 @@ impl ParitySigning for SigningQueueClient { RpcEither::Either(id.into()) }, - }) - .boxed() + })) } fn check_request(&self, id: RpcU256) -> Result, Error> { @@ -203,7 +200,7 @@ impl ParitySigning for SigningQueueClient { let (ready, p) = oneshot::oneshot(); // when dispatch is complete - res.then(move |res| { + Box::new(res.then(move |res| { // register callback via the oneshot sender. handle_dispatch(res, move |response| { match response { @@ -214,7 +211,7 @@ impl ParitySigning for SigningQueueClient { }); p - }).boxed() + })) } } @@ -230,7 +227,7 @@ impl EthSigning for SigningQueueClient { let (ready, p) = oneshot::oneshot(); - res.then(move |res| { + Box::new(res.then(move |res| { handle_dispatch(res, move |response| { match response { Ok(RpcConfirmationResponse::Signature(sig)) => ready.send(Ok(sig)), @@ -240,7 +237,7 @@ impl EthSigning for SigningQueueClient { }); p - }).boxed() + })) } fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { @@ -252,7 +249,7 @@ impl EthSigning for SigningQueueClient { let (ready, p) = oneshot::oneshot(); - res.then(move |res| { + Box::new(res.then(move |res| { handle_dispatch(res, move |response| { match response { Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.send(Ok(hash)), @@ -262,7 +259,7 @@ impl EthSigning for SigningQueueClient { }); p - }).boxed() + })) } fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { @@ -274,7 +271,7 @@ impl EthSigning for SigningQueueClient { let (ready, p) = oneshot::oneshot(); - res.then(move |res| { + Box::new(res.then(move |res| { handle_dispatch(res, move |response| { match response { Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.send(Ok(tx)), @@ -284,6 +281,6 @@ impl EthSigning for SigningQueueClient { }); p - }).boxed() + })) } } diff --git a/rpc/src/v1/impls/signing_unsafe.rs b/rpc/src/v1/impls/signing_unsafe.rs index cd027e960..779e9f20e 100644 --- a/rpc/src/v1/impls/signing_unsafe.rs +++ b/rpc/src/v1/impls/signing_unsafe.rs @@ -20,8 +20,8 @@ use std::sync::Arc; use ethcore::account_provider::AccountProvider; -use futures::{future, BoxFuture, Future}; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; +use jsonrpc_core::futures::{future, Future}; use v1::helpers::{errors, DefaultAccount}; use v1::helpers::dispatch::{self, Dispatcher}; use v1::helpers::accounts::unwrap_provider; @@ -64,12 +64,11 @@ impl SigningUnsafeClient { }; let dis = self.dispatcher.clone(); - dispatch::from_rpc(payload, default, &dis) + Box::new(dispatch::from_rpc(payload, default, &dis) .and_then(move |payload| { dispatch::execute(dis, accounts, payload, dispatch::SignWith::Nothing) }) - .map(|v| v.into_value()) - .boxed() + .map(|v| v.into_value())) } } @@ -78,33 +77,30 @@ impl EthSigning for SigningUnsafeClient type Metadata = Metadata; fn sign(&self, _: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture { - self.handle(RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), address.into()) + Box::new(self.handle(RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), address.into()) .then(|res| match res { Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), Err(e) => Err(e), e => Err(errors::internal("Unexpected result", e)), - }) - .boxed() + })) } fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - self.handle(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into()) + Box::new(self.handle(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into()) .then(|res| match res { Ok(RpcConfirmationResponse::SendTransaction(hash)) => Ok(hash), Err(e) => Err(e), e => Err(errors::internal("Unexpected result", e)), - }) - .boxed() + })) } fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - self.handle(RpcConfirmationPayload::SignTransaction(request), meta.dapp_id().into()) + Box::new(self.handle(RpcConfirmationPayload::SignTransaction(request), meta.dapp_id().into()) .then(|res| match res { Ok(RpcConfirmationResponse::SignTransaction(tx)) => Ok(tx), Err(e) => Err(e), e => Err(errors::internal("Unexpected result", e)), - }) - .boxed() + })) } } @@ -114,27 +110,26 @@ impl ParitySigning for SigningUnsafeClient { fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { let accounts = try_bf!(self.account_provider()); let default_account = accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default(); - self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into).boxed() + Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into)) } fn decrypt_message(&self, _: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture { - self.handle(RpcConfirmationPayload::Decrypt((address.clone(), data).into()), address.into()) + Box::new(self.handle(RpcConfirmationPayload::Decrypt((address.clone(), data).into()), address.into()) .then(|res| match res { Ok(RpcConfirmationResponse::Decrypt(data)) => Ok(data), Err(e) => Err(e), e => Err(errors::internal("Unexpected result", e)), - }) - .boxed() + })) } fn post_sign(&self, _: Metadata, _: RpcH160, _: RpcBytes) -> BoxFuture, Error> { // We don't support this in non-signer mode. - future::err(errors::signer_disabled()).boxed() + Box::new(future::err(errors::signer_disabled())) } fn post_transaction(&self, _: Metadata, _: RpcTransactionRequest) -> BoxFuture, Error> { // We don't support this in non-signer mode. - future::err((errors::signer_disabled())).boxed() + Box::new(future::err((errors::signer_disabled()))) } fn check_request(&self, _: RpcU256) -> Result, Error> { diff --git a/rpc/src/v1/impls/traces.rs b/rpc/src/v1/impls/traces.rs index 9ed2c39a4..f9797c1dc 100644 --- a/rpc/src/v1/impls/traces.rs +++ b/rpc/src/v1/impls/traces.rs @@ -24,7 +24,6 @@ use ethcore::transaction::SignedTransaction; use rlp::UntrustedRlp; use jsonrpc_core::Error; -use jsonrpc_core::futures::{self, Future, BoxFuture}; use jsonrpc_macros::Trailing; use v1::Metadata; use v1::traits::Traces; @@ -83,35 +82,31 @@ impl Traces for TracesClient where C: MiningBlockChainClient + 'stat .map(LocalizedTrace::from)) } - fn call(&self, meta: Self::Metadata, request: CallRequest, flags: TraceOptions, block: Trailing) -> BoxFuture { + fn call(&self, meta: Self::Metadata, request: CallRequest, flags: TraceOptions, block: Trailing) -> Result { let block = block.unwrap_or_default(); let request = CallRequest::into(request); - let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())); + let signed = fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())?; - let res = self.client.call(&signed, to_call_analytics(flags), block.into()) + self.client.call(&signed, to_call_analytics(flags), block.into()) .map(TraceResults::from) - .map_err(errors::call); - - futures::done(res).boxed() + .map_err(errors::call) } - fn call_many(&self, meta: Self::Metadata, requests: Vec<(CallRequest, TraceOptions)>, block: Trailing) -> BoxFuture, Error> { + fn call_many(&self, meta: Self::Metadata, requests: Vec<(CallRequest, TraceOptions)>, block: Trailing) -> Result, Error> { let block = block.unwrap_or_default(); - let requests = try_bf!(requests.into_iter() + let requests = requests.into_iter() .map(|(request, flags)| { let request = CallRequest::into(request); let signed = fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())?; Ok((signed, to_call_analytics(flags))) }) - .collect::, Error>>()); + .collect::, Error>>()?; - let res = self.client.call_many(&requests, block.into()) + self.client.call_many(&requests, block.into()) .map(|results| results.into_iter().map(TraceResults::from).collect()) - .map_err(errors::call); - - futures::done(res).boxed() + .map_err(errors::call) } fn raw_transaction(&self, raw_transaction: Bytes, flags: TraceOptions, block: Trailing) -> Result { diff --git a/rpc/src/v1/impls/web3.rs b/rpc/src/v1/impls/web3.rs index d0be2db81..b636ba608 100644 --- a/rpc/src/v1/impls/web3.rs +++ b/rpc/src/v1/impls/web3.rs @@ -16,7 +16,7 @@ //! Web3 rpc implementation. use hash::keccak; -use jsonrpc_core::*; +use jsonrpc_core::Error; use util::version; use v1::traits::Web3; use v1::types::{H256, Bytes}; diff --git a/rpc/src/v1/informant.rs b/rpc/src/v1/informant.rs index 0cbe7d449..9a9cde383 100644 --- a/rpc/src/v1/informant.rs +++ b/rpc/src/v1/informant.rs @@ -20,7 +20,6 @@ use std::fmt; use std::sync::Arc; use std::sync::atomic::{self, AtomicUsize}; use std::time; -use futures::Future; use futures_cpupool as pool; use jsonrpc_core as rpc; use order_stat; @@ -222,15 +221,23 @@ impl rpc::Middleware for Middleware self.notifier.active(); self.stats.count_request(); + let id = match request { + rpc::Request::Single(rpc::Call::MethodCall(ref call)) => Some(call.id.clone()), + _ => None, + }; let stats = self.stats.clone(); let future = process(request, meta).map(move |res| { - stats.add_roundtrip(Self::as_micro(start.elapsed())); + let time = Self::as_micro(start.elapsed()); + if time > 10_000 { + debug!(target: "rpc", "[{:?}] Took {}ms", id, time / 1_000); + } + stats.add_roundtrip(time); res }); match self.pool { Some(ref pool) => A(pool.spawn(future)), - None => B(future.boxed()), + None => B(Box::new(future)), } } } diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 8473d0f38..b5e1cfa4d 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -24,7 +24,7 @@ macro_rules! try_bf { ($res: expr) => { match $res { Ok(val) => val, - Err(e) => return ::futures::future::err(e.into()).boxed(), + Err(e) => return Box::new(::jsonrpc_core::futures::future::err(e.into())), } } } diff --git a/rpc/src/v1/tests/helpers/fetch.rs b/rpc/src/v1/tests/helpers/fetch.rs index 236dae91b..fba2aaf5d 100644 --- a/rpc/src/v1/tests/helpers/fetch.rs +++ b/rpc/src/v1/tests/helpers/fetch.rs @@ -17,7 +17,7 @@ //! Test implementation of fetch client. use std::{io, thread}; -use futures::{self, Future}; +use jsonrpc_core::futures::{self, Future}; use fetch::{self, Fetch}; /// Test implementation of fetcher. Will always return the same file. @@ -25,7 +25,7 @@ use fetch::{self, Fetch}; pub struct TestFetch; impl Fetch for TestFetch { - type Result = futures::BoxFuture; + type Result = Box + Send + 'static>; fn new() -> Result where Self: Sized { Ok(TestFetch) @@ -38,6 +38,6 @@ impl Fetch for TestFetch { tx.send(fetch::Response::from_reader(cursor)).unwrap(); }); - rx.map_err(|_| fetch::Error::Aborted).boxed() + Box::new(rx.map_err(|_| fetch::Error::Aborted)) } } diff --git a/rpc/src/v1/tests/mocked/signing.rs b/rpc/src/v1/tests/mocked/signing.rs index 02e6e13a7..fd08f5ee9 100644 --- a/rpc/src/v1/tests/mocked/signing.rs +++ b/rpc/src/v1/tests/mocked/signing.rs @@ -20,6 +20,7 @@ use std::time::Duration; use rlp; use jsonrpc_core::{IoHandler, Success}; +use jsonrpc_core::futures::Future; use v1::impls::SigningQueueClient; use v1::metadata::Metadata; use v1::traits::{EthSigning, ParitySigning, Parity}; @@ -36,7 +37,6 @@ use ethcore::account_provider::AccountProvider; use ethcore::client::TestBlockChainClient; use ethcore::transaction::{Transaction, Action, SignedTransaction}; use ethstore::ethkey::{Generator, Random}; -use futures::Future; use serde_json; struct SigningTester { diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 9e7ed9715..83543f10b 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -15,11 +15,9 @@ // along with Parity. If not, see . //! Eth rpc interface. -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; use jsonrpc_macros::Trailing; -use futures::BoxFuture; - use v1::types::{RichBlock, BlockNumber, Bytes, CallRequest, Filter, FilterChanges, Index}; use v1::types::{Log, Receipt, SyncStatus, Transaction, Work}; use v1::types::{H64, H160, H256, U256}; @@ -43,7 +41,7 @@ build_rpc_trait! { /// Returns block author. #[rpc(meta, name = "eth_coinbase")] - fn author(&self, Self::Metadata) -> BoxFuture; + fn author(&self, Self::Metadata) -> Result; /// Returns true if client is actively mining new blocks. #[rpc(name = "eth_mining")] @@ -55,50 +53,50 @@ build_rpc_trait! { /// Returns accounts list. #[rpc(meta, name = "eth_accounts")] - fn accounts(&self, Self::Metadata) -> BoxFuture, Error>; + fn accounts(&self, Self::Metadata) -> Result, Error>; /// Returns highest block number. #[rpc(name = "eth_blockNumber")] fn block_number(&self) -> Result; /// Returns balance of the given account. - #[rpc(async, name = "eth_getBalance")] + #[rpc(name = "eth_getBalance")] fn balance(&self, H160, Trailing) -> BoxFuture; /// Returns content of the storage at given address. - #[rpc(async, name = "eth_getStorageAt")] + #[rpc(name = "eth_getStorageAt")] fn storage_at(&self, H160, U256, Trailing) -> BoxFuture; /// Returns block with given hash. - #[rpc(async, name = "eth_getBlockByHash")] + #[rpc(name = "eth_getBlockByHash")] fn block_by_hash(&self, H256, bool) -> BoxFuture, Error>; /// Returns block with given number. - #[rpc(async, name = "eth_getBlockByNumber")] + #[rpc(name = "eth_getBlockByNumber")] fn block_by_number(&self, BlockNumber, bool) -> BoxFuture, Error>; /// Returns the number of transactions sent from given address at given time (block number). - #[rpc(async, name = "eth_getTransactionCount")] + #[rpc(name = "eth_getTransactionCount")] fn transaction_count(&self, H160, Trailing) -> BoxFuture; /// Returns the number of transactions in a block with given hash. - #[rpc(async, name = "eth_getBlockTransactionCountByHash")] + #[rpc(name = "eth_getBlockTransactionCountByHash")] fn block_transaction_count_by_hash(&self, H256) -> BoxFuture, Error>; /// Returns the number of transactions in a block with given block number. - #[rpc(async, name = "eth_getBlockTransactionCountByNumber")] + #[rpc(name = "eth_getBlockTransactionCountByNumber")] fn block_transaction_count_by_number(&self, BlockNumber) -> BoxFuture, Error>; /// Returns the number of uncles in a block with given hash. - #[rpc(async, name = "eth_getUncleCountByBlockHash")] + #[rpc(name = "eth_getUncleCountByBlockHash")] fn block_uncles_count_by_hash(&self, H256) -> BoxFuture, Error>; /// Returns the number of uncles in a block with given block number. - #[rpc(async, name = "eth_getUncleCountByBlockNumber")] + #[rpc(name = "eth_getUncleCountByBlockNumber")] fn block_uncles_count_by_number(&self, BlockNumber) -> BoxFuture, Error>; /// Returns the code at given address at given time (block number). - #[rpc(async, name = "eth_getCode")] + #[rpc(name = "eth_getCode")] fn code_at(&self, H160, Trailing) -> BoxFuture; /// Sends signed transaction, returning its hash. @@ -162,7 +160,7 @@ build_rpc_trait! { fn compile_serpent(&self, String) -> Result; /// Returns logs matching given filter object. - #[rpc(async, name = "eth_getLogs")] + #[rpc(name = "eth_getLogs")] fn logs(&self, Filter) -> BoxFuture, Error>; /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. @@ -196,11 +194,11 @@ build_rpc_trait! { fn new_pending_transaction_filter(&self) -> Result; /// Returns filter changes since last poll. - #[rpc(async, name = "eth_getFilterChanges")] + #[rpc(name = "eth_getFilterChanges")] fn filter_changes(&self, Index) -> BoxFuture; /// Returns all logs matching given filter (in a range 'from' - 'to'). - #[rpc(async, name = "eth_getFilterLogs")] + #[rpc(name = "eth_getFilterLogs")] fn filter_logs(&self, Index) -> BoxFuture, Error>; /// Uninstalls filter. diff --git a/rpc/src/v1/traits/eth_pubsub.rs b/rpc/src/v1/traits/eth_pubsub.rs index 794d12768..12d637862 100644 --- a/rpc/src/v1/traits/eth_pubsub.rs +++ b/rpc/src/v1/traits/eth_pubsub.rs @@ -20,7 +20,6 @@ use jsonrpc_core::Error; use jsonrpc_macros::Trailing; use jsonrpc_macros::pubsub::Subscriber; use jsonrpc_pubsub::SubscriptionId; -use futures::BoxFuture; use v1::types::pubsub; @@ -36,7 +35,7 @@ build_rpc_trait! { /// Unsubscribe from existing Eth subscription. #[rpc(name = "eth_unsubscribe")] - fn unsubscribe(&self, SubscriptionId) -> BoxFuture; + fn unsubscribe(&self, SubscriptionId) -> Result; } } } diff --git a/rpc/src/v1/traits/eth_signing.rs b/rpc/src/v1/traits/eth_signing.rs index e3b9c9b20..1b84fe3f8 100644 --- a/rpc/src/v1/traits/eth_signing.rs +++ b/rpc/src/v1/traits/eth_signing.rs @@ -16,8 +16,7 @@ //! Eth rpc interface. -use jsonrpc_core::Error; -use futures::BoxFuture; +use jsonrpc_core::{BoxFuture, Error}; use v1::types::{Bytes, H160, H256, H520, TransactionRequest, RichRawTransaction}; diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index e902d6052..7e1fc9a21 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -18,9 +18,8 @@ use std::collections::BTreeMap; -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; use jsonrpc_macros::Trailing; -use futures::BoxFuture; use node_health::Health; use v1::types::{ @@ -51,7 +50,7 @@ build_rpc_trait! { /// Returns default account for dapp. #[rpc(meta, name = "parity_defaultAccount")] - fn default_account(&self, Self::Metadata) -> BoxFuture; + fn default_account(&self, Self::Metadata) -> Result; /// Returns current transactions limit. #[rpc(name = "parity_transactionsLimit")] @@ -106,7 +105,7 @@ build_rpc_trait! { fn default_extra_data(&self) -> Result; /// Returns distribution of gas price in latest blocks. - #[rpc(async, name = "parity_gasPriceHistogram")] + #[rpc(name = "parity_gasPriceHistogram")] fn gas_price_histogram(&self) -> BoxFuture; /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) @@ -165,7 +164,7 @@ build_rpc_trait! { fn ws_url(&self) -> Result; /// Returns next nonce for particular sender. Should include all transactions in the queue. - #[rpc(async, name = "parity_nextNonce")] + #[rpc(name = "parity_nextNonce")] fn next_nonce(&self, H160) -> BoxFuture; /// Get the mode. Returns one of: "active", "passive", "dark", "offline". @@ -208,7 +207,7 @@ build_rpc_trait! { /// Get block header. /// Same as `eth_getBlockByNumber` but without uncles and transactions. - #[rpc(async, name = "parity_getBlockHeaderByNumber")] + #[rpc(name = "parity_getBlockHeaderByNumber")] fn block_header(&self, Trailing) -> BoxFuture; /// Get IPFS CIDv0 given protobuf encoded bytes. @@ -217,10 +216,10 @@ build_rpc_trait! { /// Call contract, returning the output data. #[rpc(meta, name = "parity_call")] - fn call(&self, Self::Metadata, Vec, Trailing) -> BoxFuture, Error>; + fn call(&self, Self::Metadata, Vec, Trailing) -> Result, Error>; /// Returns node's health report. - #[rpc(async, name = "parity_nodeHealth")] + #[rpc(name = "parity_nodeHealth")] fn node_health(&self) -> BoxFuture; } } diff --git a/rpc/src/v1/traits/parity_set.rs b/rpc/src/v1/traits/parity_set.rs index 15f35b671..cd964daa3 100644 --- a/rpc/src/v1/traits/parity_set.rs +++ b/rpc/src/v1/traits/parity_set.rs @@ -16,8 +16,7 @@ //! Parity-specific rpc interface for operations altering the settings. -use jsonrpc_core::Error; -use futures::BoxFuture; +use jsonrpc_core::{BoxFuture, Error}; use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; @@ -93,7 +92,7 @@ build_rpc_trait! { fn set_spec_name(&self, String) -> Result; /// Hash a file content under given URL. - #[rpc(async, name = "parity_hashContent")] + #[rpc(name = "parity_hashContent")] fn hash_content(&self, String) -> BoxFuture; /// Returns true if refresh successful, error if unsuccessful or server is disabled. diff --git a/rpc/src/v1/traits/parity_signing.rs b/rpc/src/v1/traits/parity_signing.rs index 344a477c4..cf20dba36 100644 --- a/rpc/src/v1/traits/parity_signing.rs +++ b/rpc/src/v1/traits/parity_signing.rs @@ -15,8 +15,7 @@ // along with Parity. If not, see . //! ParitySigning rpc interface. -use jsonrpc_core::Error; -use futures::BoxFuture; +use jsonrpc_core::{BoxFuture, Error}; use v1::types::{U256, H160, Bytes, ConfirmationResponse, TransactionRequest, Either}; diff --git a/rpc/src/v1/traits/personal.rs b/rpc/src/v1/traits/personal.rs index f62f5ce7b..a8f575c30 100644 --- a/rpc/src/v1/traits/personal.rs +++ b/rpc/src/v1/traits/personal.rs @@ -15,9 +15,7 @@ // along with Parity. If not, see . //! Personal rpc interface. -use jsonrpc_core::Error; - -use futures::BoxFuture; +use jsonrpc_core::{BoxFuture, Error}; use v1::types::{U128, H160, H256, TransactionRequest}; diff --git a/rpc/src/v1/traits/pubsub.rs b/rpc/src/v1/traits/pubsub.rs index 7957a3875..27d79c911 100644 --- a/rpc/src/v1/traits/pubsub.rs +++ b/rpc/src/v1/traits/pubsub.rs @@ -20,7 +20,6 @@ use jsonrpc_core::{Error, Value, Params}; use jsonrpc_pubsub::SubscriptionId; use jsonrpc_macros::Trailing; use jsonrpc_macros::pubsub::Subscriber; -use futures::BoxFuture; build_rpc_trait! { /// Parity-specific PUB-SUB rpc interface. @@ -34,7 +33,7 @@ build_rpc_trait! { /// Unsubscribe from existing Parity subscription. #[rpc(name = "parity_unsubscribe")] - fn parity_unsubscribe(&self, SubscriptionId) -> BoxFuture; + fn parity_unsubscribe(&self, SubscriptionId) -> Result; } } } diff --git a/rpc/src/v1/traits/signer.rs b/rpc/src/v1/traits/signer.rs index f71b5f604..107def9f2 100644 --- a/rpc/src/v1/traits/signer.rs +++ b/rpc/src/v1/traits/signer.rs @@ -15,10 +15,9 @@ // along with Parity. If not, see . //! Parity Signer-related rpc interface. -use jsonrpc_core::Error; +use jsonrpc_core::{BoxFuture, Error}; use jsonrpc_pubsub::SubscriptionId; use jsonrpc_macros::pubsub::Subscriber; -use futures::BoxFuture; use v1::types::{U256, Bytes, TransactionModification, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken}; @@ -32,11 +31,11 @@ build_rpc_trait! { fn requests_to_confirm(&self) -> Result, Error>; /// Confirm specific request. - #[rpc(async, name = "signer_confirmRequest")] + #[rpc(name = "signer_confirmRequest")] fn confirm_request(&self, U256, TransactionModification, String) -> BoxFuture; /// Confirm specific request with token. - #[rpc(async, name = "signer_confirmRequestWithToken")] + #[rpc(name = "signer_confirmRequestWithToken")] fn confirm_request_with_token(&self, U256, TransactionModification, String) -> BoxFuture; /// Confirm specific request with already signed data. @@ -62,7 +61,7 @@ build_rpc_trait! { /// Unsubscribe from pending requests subscription. #[rpc(name = "signer_unsubscribePending")] - fn unsubscribe_pending(&self, SubscriptionId) -> BoxFuture; + fn unsubscribe_pending(&self, SubscriptionId) -> Result; } } } diff --git a/rpc/src/v1/traits/traces.rs b/rpc/src/v1/traits/traces.rs index 33bcb0afd..a6cb43810 100644 --- a/rpc/src/v1/traits/traces.rs +++ b/rpc/src/v1/traits/traces.rs @@ -17,7 +17,6 @@ //! Traces specific rpc interface. use jsonrpc_core::Error; -use jsonrpc_core::futures::BoxFuture; use jsonrpc_macros::Trailing; use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256, TraceOptions}; @@ -44,11 +43,11 @@ build_rpc_trait! { /// Executes the given call and returns a number of possible traces for it. #[rpc(meta, name = "trace_call")] - fn call(&self, Self::Metadata, CallRequest, TraceOptions, Trailing) -> BoxFuture; + fn call(&self, Self::Metadata, CallRequest, TraceOptions, Trailing) -> Result; /// Executes all given calls and returns a number of possible traces for each of it. #[rpc(meta, name = "trace_callMany")] - fn call_many(&self, Self::Metadata, Vec<(CallRequest, TraceOptions)>, Trailing) -> BoxFuture, Error>; + fn call_many(&self, Self::Metadata, Vec<(CallRequest, TraceOptions)>, Trailing) -> Result, Error>; /// Executes the given raw transaction and returns a number of possible traces for it. #[rpc(name = "trace_rawTransaction")] diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index 1f1840848..cada20fa1 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -9,15 +9,12 @@ version = "1.4.0" [dependencies] futures = "0.1" log = "0.3.6" -rand = "0.3.14" serde = "1.0" serde_json = "1.0" -tempdir = "0.3.5" url = "1.2.0" matches = "0.1" parking_lot = "0.4" -jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } parity-rpc = { path = "../rpc" } -ethcore-util = { path = "../util" } hash = { path = "../util/hash" } diff --git a/rpc_client/src/client.rs b/rpc_client/src/client.rs index 27b72ab78..fa6144fd2 100644 --- a/rpc_client/src/client.rs +++ b/rpc_client/src/client.rs @@ -1,5 +1,3 @@ -extern crate jsonrpc_core; - use std::fmt::{Debug, Formatter, Error as FmtError}; use std::io::{BufReader, BufRead}; use std::sync::Arc; @@ -33,9 +31,9 @@ use serde_json::{ Error as JsonError, }; -use futures::{BoxFuture, Canceled, Complete, Future, oneshot, done}; +use futures::{Canceled, Complete, Future, oneshot, done}; -use jsonrpc_core::{Id, Version, Params, Error as JsonRpcError}; +use jsonrpc_core::{BoxFuture, Id, Version, Params, Error as JsonRpcError}; use jsonrpc_core::request::MethodCall; use jsonrpc_core::response::{Output, Success, Failure}; @@ -212,7 +210,7 @@ impl Rpc { ) -> BoxFuture, Canceled> { let (c, p) = oneshot::>(); match get_authcode(authpath) { - Err(e) => return done(Ok(Err(e))).boxed(), + Err(e) => return Box::new(done(Ok(Err(e)))), Ok(code) => { let url = String::from(url); // The ws::connect takes a FnMut closure, which means c cannot @@ -239,7 +237,7 @@ impl Rpc { _ => () } }); - p.boxed() + Box::new(p) } } } @@ -266,7 +264,7 @@ impl Rpc { .expect("request is serializable"); let _ = self.out.send(serialized); - p.map(|result| { + Box::new(p.map(|result| { match result { Ok(json) => { let t: T = json::from_value(json)?; @@ -274,7 +272,7 @@ impl Rpc { }, Err(err) => Err(err) } - }).boxed() + })) } } diff --git a/rpc_client/src/lib.rs b/rpc_client/src/lib.rs index 7459ca9a4..680c0484b 100644 --- a/rpc_client/src/lib.rs +++ b/rpc_client/src/lib.rs @@ -1,16 +1,13 @@ pub mod client; pub mod signer_client; -extern crate ethcore_util as util; extern crate futures; extern crate jsonrpc_core; extern crate jsonrpc_ws_server as ws; extern crate parity_rpc as rpc; extern crate parking_lot; -extern crate rand; extern crate serde; extern crate serde_json; -extern crate tempdir; extern crate url; extern crate hash; diff --git a/rpc_client/src/signer_client.rs b/rpc_client/src/signer_client.rs index ae051efb6..df3744052 100644 --- a/rpc_client/src/signer_client.rs +++ b/rpc_client/src/signer_client.rs @@ -3,7 +3,8 @@ use rpc::signer::{ConfirmationRequest, TransactionModification, U256, Transactio use serde; use serde_json::{Value as JsonValue, to_value}; use std::path::PathBuf; -use futures::{BoxFuture, Canceled}; +use futures::{Canceled}; +use jsonrpc_core::BoxFuture; pub struct SignerRpc { rpc: Rpc, diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index b6107bd8d..fab5c16fd 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -57,7 +57,7 @@ const KEEP_ALIVE_SEND_INTERVAL: u64 = 30; const KEEP_ALIVE_DISCONNECT_INTERVAL: u64 = 60; /// Empty future. -type BoxedEmptyFuture = ::std::boxed::Box + Send>; +type BoxedEmptyFuture = Box + Send>; /// Cluster interface for external clients. pub trait ClusterClient: Send + Sync { diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs index 620003775..a2a282701 100644 --- a/secret_store/src/key_server_cluster/io/deadline.rs +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -19,7 +19,7 @@ use std::time::Duration; use futures::{Future, Select, Poll, Async}; use tokio_core::reactor::{Handle, Timeout}; -type DeadlineBox = ::std::boxed::Box::Item>, Error = ::Error> + Send>; +type DeadlineBox = Box::Item>, Error = ::Error> + Send>; /// Complete a passed future or fail if it is not completed within timeout. pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index 4a6733e67..47f2c0447 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -11,19 +11,17 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] log = "0.3" -jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } ethcore-util = { path = "../util" } ethcore-bigint = { path = "../util/bigint" } ethcore-devtools = { path = "../devtools" } -lazy_static = "0.2" env_logger = "0.4" ethcore-ipc = { path = "../ipc/rpc" } -semver = "0.6" ethcore-ipc-nano = { path = "../ipc/nano" } -futures = "0.1" tokio-core = "0.1" +tokio-io = "0.1" parking_lot = "0.4" ethcore-logger = { path = "../logger" } hash = { path = "../util/hash" } diff --git a/stratum/src/lib.rs b/stratum/src/lib.rs index ce01e2c8e..527c43f20 100644 --- a/stratum/src/lib.rs +++ b/stratum/src/lib.rs @@ -23,18 +23,13 @@ extern crate jsonrpc_macros; extern crate ethcore_util as util; extern crate ethcore_bigint as bigint; extern crate ethcore_ipc as ipc; -extern crate semver; -extern crate futures; -extern crate ethcore_logger; extern crate hash; extern crate parking_lot; #[cfg(test)] extern crate tokio_core; -extern crate ethcore_devtools as devtools; +#[cfg(test)] extern crate tokio_io; +#[cfg(test)] extern crate ethcore_logger; #[cfg(test)] extern crate env_logger; -#[cfg(test)] #[macro_use] extern crate lazy_static; - -use futures::{future, BoxFuture, Future}; mod traits { //! Stratum ipc interfaces specification @@ -61,7 +56,7 @@ use hash::keccak; use bigint::hash::H256; use parking_lot::{RwLock, RwLockReadGuard}; -type RpcResult = BoxFuture; +type RpcResult = Result; const NOTIFY_COUNTER_INITIAL: u32 = 16; @@ -188,7 +183,7 @@ impl Stratum { } fn submit(&self, params: Params, _meta: SocketMetadata) -> RpcResult { - future::ok(match params { + Ok(match params { Params::Array(vals) => { // first two elements are service messages (worker_id & job_id) match self.dispatcher.submit(vals.iter().skip(2) @@ -208,7 +203,7 @@ impl Stratum { trace!(target: "stratum", "Invalid submit work format {:?}", params); to_value(false) } - }.expect("Only true/false is returned and it's always serializable; qed")).boxed() + }.expect("Only true/false is returned and it's always serializable; qed")) } fn subscribe(&self, _params: Params, meta: SocketMetadata) -> RpcResult { @@ -218,7 +213,7 @@ impl Stratum { self.job_que.write().insert(meta.addr().clone()); trace!(target: "stratum", "Subscription request from {:?}", meta.addr()); - future::ok(match self.dispatcher.initial() { + Ok(match self.dispatcher.initial() { Some(initial) => match jsonrpc_core::Value::from_str(&initial) { Ok(val) => Ok(val), Err(e) => { @@ -227,11 +222,11 @@ impl Stratum { }, }, None => to_value(&[0u8; 0]), - }.expect("Empty slices are serializable; qed")).boxed() + }.expect("Empty slices are serializable; qed")) } fn authorize(&self, params: Params, meta: SocketMetadata) -> RpcResult { - future::result(params.parse::<(String, String)>().map(|(worker_id, secret)|{ + params.parse::<(String, String)>().map(|(worker_id, secret)|{ if let Some(valid_secret) = self.secret { let hash = keccak(secret); if hash != valid_secret { @@ -241,7 +236,7 @@ impl Stratum { trace!(target: "stratum", "New worker #{} registered", worker_id); self.workers.write().insert(meta.addr().clone(), worker_id); to_value(true) - }).map(|v| v.expect("Only true/false is returned and it's always serializable; qed"))).boxed() + }).map(|v| v.expect("Only true/false is returned and it's always serializable; qed")) } pub fn subscribers(&self) -> RwLockReadGuard> { @@ -330,8 +325,8 @@ mod tests { use tokio_core::reactor::{Core, Timeout}; use tokio_core::net::TcpStream; - use tokio_core::io; - use futures::{Future, future}; + use tokio_io::io; + use jsonrpc_core::futures::{Future, future}; use ethcore_logger::init_log; diff --git a/updater/src/updater.rs b/updater/src/updater.rs index 04fe572a9..efa34adab 100644 --- a/updater/src/updater.rs +++ b/updater/src/updater.rs @@ -21,7 +21,7 @@ use std::sync::{Arc, Weak}; use ethcore::client::{BlockId, BlockChainClient, ChainNotify}; use ethsync::{SyncProvider}; -use futures::{future, Future, BoxFuture}; +use futures::future; use hash_fetch::{self as fetch, HashFetch}; use hash_fetch::fetch::Client as FetchService; use ipc_common_types::{VersionInfo, ReleaseTrack}; @@ -343,12 +343,12 @@ impl fetch::urlhint::ContractClient for Updater { .ok_or_else(|| "Registrar not available".into()) } - fn call(&self, address: Address, data: Bytes) -> BoxFuture { - future::done( + fn call(&self, address: Address, data: Bytes) -> fetch::urlhint::BoxFuture { + Box::new(future::done( self.client.upgrade() .ok_or_else(|| "Client not available".into()) .and_then(move |c| c.call_contract(BlockId::Latest, address, data)) - ).boxed() + )) } } diff --git a/util/fetch/Cargo.toml b/util/fetch/Cargo.toml index f88bdb551..a1a90b5f7 100644 --- a/util/fetch/Cargo.toml +++ b/util/fetch/Cargo.toml @@ -11,8 +11,7 @@ futures = "0.1" futures-cpupool = "0.1" parking_lot = "0.4" log = "0.3" -reqwest = "0.6" -mime = "0.2" +reqwest = "0.7" [features] default = [] diff --git a/util/fetch/src/client.rs b/util/fetch/src/client.rs index 64193639a..a24cf991b 100644 --- a/util/fetch/src/client.rs +++ b/util/fetch/src/client.rs @@ -20,11 +20,13 @@ use std::{io, fmt, time}; use std::sync::Arc; use std::sync::atomic::{self, AtomicBool}; -use futures::{self, BoxFuture, Future}; +use futures::{self, Future}; use futures_cpupool::{CpuPool, CpuFuture}; -use mime::{self, Mime}; use parking_lot::RwLock; use reqwest; +use reqwest::mime::Mime; + +type BoxFuture = Box + Send>; /// Fetch abort control #[derive(Default, Debug, Clone)] @@ -58,16 +60,19 @@ pub trait Fetch: Clone + Send + Sync + 'static { I: Send + 'static, E: Send + 'static, { - f.boxed() + Box::new(f) } /// Spawn the future in context of this `Fetch` thread pool as "fire and forget", i.e. dropping this future without /// canceling the underlying future. /// Implementation is optional. - fn forget(&self, _: F) where + fn process_and_forget(&self, _: F) where F: Future + Send + 'static, I: Send + 'static, - E: Send + 'static {} + E: Send + 'static, + { + panic!("Attempting to process and forget future on unsupported Fetch."); + } /// Fetch URL and get a future for the result. /// Supports aborting the request in the middle of execution. @@ -109,9 +114,9 @@ impl Clone for Client { impl Client { fn new_client() -> Result, Error> { - let mut client = reqwest::Client::new()?; + let mut client = reqwest::ClientBuilder::new()?; client.redirect(reqwest::RedirectPolicy::limited(5)); - Ok(Arc::new(client)) + Ok(Arc::new(client.build()?)) } fn with_limit(limit: Option) -> Result { @@ -154,10 +159,10 @@ impl Fetch for Client { I: Send + 'static, E: Send + 'static, { - self.pool.spawn(f).boxed() + Box::new(self.pool.spawn(f)) } - fn forget(&self, f: F) where + fn process_and_forget(&self, f: F) where F: Future + Send + 'static, I: Send + 'static, E: Send + 'static, @@ -203,8 +208,8 @@ impl Future for FetchTask { } trace!(target: "fetch", "Starting fetch task: {:?}", self.url); - let result = self.client.get(&self.url) - .header(reqwest::header::UserAgent("Parity Fetch".into())) + let result = self.client.get(&self.url)? + .header(reqwest::header::UserAgent::new("Parity Fetch")) .send()?; Ok(futures::Async::Ready(Response { @@ -289,7 +294,7 @@ impl Response { /// Returns status code of this response. pub fn status(&self) -> reqwest::StatusCode { match self.inner { - ResponseInner::Response(ref r) => *r.status(), + ResponseInner::Response(ref r) => r.status(), ResponseInner::NotFound => reqwest::StatusCode::NotFound, _ => reqwest::StatusCode::Ok, } @@ -303,7 +308,7 @@ impl Response { /// Returns `true` if content type of this response is `text/html` pub fn is_html(&self) -> bool { match self.content_type() { - Some(Mime(mime::TopLevel::Text, mime::SubLevel::Html, _)) => true, + Some(ref mime) if mime.type_() == "text" && mime.subtype() == "html" => true, _ => false, } } diff --git a/util/fetch/src/lib.rs b/util/fetch/src/lib.rs index 21905c532..1f68afe2c 100644 --- a/util/fetch/src/lib.rs +++ b/util/fetch/src/lib.rs @@ -26,10 +26,9 @@ extern crate futures_cpupool; extern crate parking_lot; extern crate reqwest; -pub extern crate mime; pub mod client; pub use self::reqwest::StatusCode; -pub use self::mime::Mime; +pub use self::reqwest::mime::Mime; pub use self::client::{Client, Fetch, Error, Response, Abort}; diff --git a/whisper/Cargo.toml b/whisper/Cargo.toml index b4898f00d..3aa482cc3 100644 --- a/whisper/Cargo.toml +++ b/whisper/Cargo.toml @@ -11,7 +11,6 @@ ethcore-bigint = { path = "../util/bigint" } ethcore-network = { path = "../util/network" } ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } -futures = "0.1" hex = "0.2" log = "0.3" ordered-float = "0.5" @@ -27,6 +26,6 @@ smallvec = "0.4" time = "0.1" tiny-keccak = "1.3" -jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } +jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" } diff --git a/whisper/src/lib.rs b/whisper/src/lib.rs index e865637ed..7e13a2c24 100644 --- a/whisper/src/lib.rs +++ b/whisper/src/lib.rs @@ -22,7 +22,6 @@ extern crate ethcore_bigint as bigint; extern crate ethcore_network as network; extern crate ethcrypto; extern crate ethkey; -extern crate futures; extern crate hex; extern crate ordered_float; extern crate parking_lot; @@ -30,7 +29,6 @@ extern crate rand; extern crate rlp; extern crate ring; extern crate serde; -extern crate serde_json; extern crate slab; extern crate smallvec; extern crate time; @@ -51,6 +49,9 @@ extern crate jsonrpc_macros; #[macro_use] extern crate serde_derive; +#[cfg(test)] +extern crate serde_json; + pub use self::message::Message; pub use self::net::{Network, MessageHandler}; diff --git a/whisper/src/rpc/mod.rs b/whisper/src/rpc/mod.rs index ed47ada15..30fd04db9 100644 --- a/whisper/src/rpc/mod.rs +++ b/whisper/src/rpc/mod.rs @@ -28,7 +28,6 @@ use jsonrpc_pubsub::{Session, PubSubMetadata, SubscriptionId}; use jsonrpc_macros::pubsub; use bigint::hash::H256; -use futures::{future, BoxFuture}; use parking_lot::RwLock; use self::filter::Filter; @@ -140,7 +139,7 @@ build_rpc_trait! { /// Unsubscribe from filter matching given ID. Return /// true on success, error otherwise. #[rpc(name = "shh_unsubscribe")] - fn unsubscribe(&self, SubscriptionId) -> BoxFuture; + fn unsubscribe(&self, SubscriptionId) -> Result; } } } @@ -377,7 +376,7 @@ impl WhisperPubSub for } } - fn unsubscribe(&self, id: SubscriptionId) -> BoxFuture { + fn unsubscribe(&self, id: SubscriptionId) -> Result { use std::str::FromStr; let res = match id { @@ -387,6 +386,6 @@ impl WhisperPubSub for SubscriptionId::Number(_) => Err("unrecognized ID"), }; - Box::new(future::done(res.map_err(whisper_error))) + res.map_err(whisper_error) } } From fb6573207609c89711e6aa640fe1e98d5eccc28e Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Thu, 5 Oct 2017 13:03:31 +0200 Subject: [PATCH 10/22] Fix Token Transfer in transaction list (#6589) * Fix Token Transfer in TX LIST * Add TokenReg logs logging --- js/src/redux/providers/tokens.js | 5 + js/src/redux/providers/tokensActions.js | 2 +- js/src/ui/MethodDecoding/methodDecoding.js | 8 +- js/src/ui/MethodDecoding/tokenValue.js | 102 +++++++++++++++++++++ 4 files changed, 113 insertions(+), 4 deletions(-) create mode 100644 js/src/ui/MethodDecoding/tokenValue.js diff --git a/js/src/redux/providers/tokens.js b/js/src/redux/providers/tokens.js index e1c71498b..2ff8b8ce1 100644 --- a/js/src/redux/providers/tokens.js +++ b/js/src/redux/providers/tokens.js @@ -18,8 +18,11 @@ import { updateTokensFilter } from './balancesActions'; import { loadTokens, fetchTokens } from './tokensActions'; import { padRight } from '~/api/util/format'; +import { LOG_KEYS, getLogger } from '~/config'; import Contracts from '~/contracts'; +const log = getLogger(LOG_KEYS.Balances); + let instance = null; export default class Tokens { @@ -155,6 +158,8 @@ export default class Tokens { const { dispatch, getState } = this._store; const tokenIds = logs.map((log) => log.params.id.value.toNumber()); + log.debug('got TokenRegistry logs', logs, tokenIds); + return fetchTokens(tokenIds)(dispatch, getState) .then(() => updateTokensFilter()(dispatch, getState)); } diff --git a/js/src/redux/providers/tokensActions.js b/js/src/redux/providers/tokensActions.js index 59245b27a..a9de46667 100644 --- a/js/src/redux/providers/tokensActions.js +++ b/js/src/redux/providers/tokensActions.js @@ -155,7 +155,7 @@ export function loadTokensBasics (_tokenIndexes, options) { }; } -export function fetchTokens (_tokenIndexes, options = {}) { +export function fetchTokens (_tokenIndexes) { const tokenIndexes = uniq(_tokenIndexes || []); const tokenChunks = chunk(tokenIndexes, 64); diff --git a/js/src/ui/MethodDecoding/methodDecoding.js b/js/src/ui/MethodDecoding/methodDecoding.js index eb42a8bff..711e51ad9 100644 --- a/js/src/ui/MethodDecoding/methodDecoding.js +++ b/js/src/ui/MethodDecoding/methodDecoding.js @@ -23,6 +23,7 @@ import { connect } from 'react-redux'; import { TypedInput, InputAddress } from '../Form'; import MethodDecodingStore from './methodDecodingStore'; +import TokenValue from './tokenValue'; import styles from './methodDecoding.css'; @@ -602,9 +603,10 @@ class MethodDecoding extends Component { const { token } = this.props; return ( - - { value.div(token.format).toFormat(5) } { token.tag } - + ); } diff --git a/js/src/ui/MethodDecoding/tokenValue.js b/js/src/ui/MethodDecoding/tokenValue.js new file mode 100644 index 000000000..166773389 --- /dev/null +++ b/js/src/ui/MethodDecoding/tokenValue.js @@ -0,0 +1,102 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import React, { Component, PropTypes } from 'react'; +import { connect } from 'react-redux'; +import { bindActionCreators } from 'redux'; + +import { fetchTokens } from '../../redux/providers/tokensActions'; +import styles from './methodDecoding.css'; + +class TokenValue extends Component { + static propTypes = { + id: PropTypes.string.isRequired, + value: PropTypes.object.isRequired, + + fetchTokens: PropTypes.func, + token: PropTypes.object + }; + + componentWillMount () { + const { token } = this.props; + + if (!token.fetched) { + this.props.fetchTokens([ token.index ]); + } + } + + render () { + const { token, value } = this.props; + + if (!token.format) { + console.warn('token with no format', token); + } + + const format = token.format + ? token.format + : 1; + + const precision = token.format + ? 5 + : 0; + + const tag = token.format + ? token.tag + : 'TOKENS'; + + return ( + + { value.div(format).toFormat(precision) } { tag } + + ); + } +} + +function mapStateToProps (initState, initProps) { + const { id } = initProps; + let token = Object.assign({}, initState.tokens[id]); + + if (token.fetched) { + return () => ({ token }); + } + + let update = true; + + return (state) => { + if (update) { + const { tokens } = state; + const nextToken = tokens[id]; + + if (nextToken.fetched) { + token = Object.assign({}, nextToken); + update = false; + } + } + + return { token }; + }; +} + +function mapDispatchToProps (dispatch) { + return bindActionCreators({ + fetchTokens + }, dispatch); +} + +export default connect( + mapStateToProps, + mapDispatchToProps +)(TokenValue); From c7ea25227a01aa9d22964f84489d7f16a224ce5c Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Thu, 5 Oct 2017 11:46:58 +0000 Subject: [PATCH 11/22] [ci skip] js-precompiled 20171005-114154 --- Cargo.lock | 2 +- js/package-lock.json | 2 +- js/package.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fe53dbe7f..a48aac0e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#6916f0be0012898f29d3e949a41be0daafee08af" +source = "git+https://github.com/paritytech/js-precompiled.git#9f49cabf010c9b3a5215d58d19d3aa5d5b88c6c2" dependencies = [ "parity-dapps-glue 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package-lock.json b/js/package-lock.json index 20f872271..95f77af97 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.25", + "version": "1.8.26", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/js/package.json b/js/package.json index 05f9291b9..a323b8900 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.25", + "version": "1.8.26", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 3f520b864b3bac66dd0f473bb88bac95e0755605 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Thu, 5 Oct 2017 12:04:49 +0000 Subject: [PATCH 12/22] [ci skip] js-precompiled 20171005-120001 --- Cargo.lock | 2 +- js/package-lock.json | 2 +- js/package.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a48aac0e0..beab2a66e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#9f49cabf010c9b3a5215d58d19d3aa5d5b88c6c2" +source = "git+https://github.com/paritytech/js-precompiled.git#c1cd82b377a7e21ddec99a19595685a36afbb352" dependencies = [ "parity-dapps-glue 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package-lock.json b/js/package-lock.json index 95f77af97..6fbeca609 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.26", + "version": "1.8.27", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/js/package.json b/js/package.json index a323b8900..4867e8cad 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.26", + "version": "1.8.27", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From bae6a5eeecab7296f852f6c1295da13e62232a29 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 5 Oct 2017 15:34:30 +0200 Subject: [PATCH 13/22] move additional_info to engines, fixes registry on non-ethash chains --- ethcore/src/engines/mod.rs | 8 +++++--- ethcore/src/ethereum/ethash.rs | 6 ++---- ethcore/src/machine.rs | 9 ++++++++- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index 5078ebc3a..2e583b179 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -191,9 +191,6 @@ pub trait Engine: Sync + Send { /// Additional engine-specific information for the user/developer concerning `header`. fn extra_info(&self, _header: &M::Header) -> BTreeMap { BTreeMap::new() } - /// Additional information. - fn additional_params(&self) -> HashMap { HashMap::new() } - /// Maximum number of uncles a block is allowed to declare. fn maximum_uncle_count(&self) -> usize { 2 } /// The number of generations back that uncles can be. @@ -396,6 +393,11 @@ pub trait EthEngine: Engine<::machine::EthereumMachine> { fn supports_wasm(&self) -> bool { self.machine().supports_wasm() } + + /// Additional information. + fn additional_params(&self) -> HashMap { + self.machine().additional_params() + } } // convenience wrappers for existing functions. diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index e7a75af58..e0a85ab9f 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -16,7 +16,7 @@ use std::path::Path; use std::cmp; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::sync::Arc; use hash::{KECCAK_EMPTY_LIST_RLP}; use ethash::{quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor}; @@ -26,7 +26,7 @@ use unexpected::{OutOfBounds, Mismatch}; use block::*; use error::{BlockError, Error}; use header::Header; -use engines::{self, Engine, EthEngine}; +use engines::{self, Engine}; use ethjson; use rlp::{self, UntrustedRlp}; use machine::EthereumMachine; @@ -150,8 +150,6 @@ impl Engine for Arc { // Two fields - nonce and mix. fn seal_fields(&self) -> usize { 2 } - fn additional_params(&self) -> HashMap { hash_map!["registrar".to_owned() => self.params().registrar.hex()] } - /// Additional engine-specific information for the user/developer concerning `header`. fn extra_info(&self, header: &Header) -> BTreeMap { if header.seal().len() == self.seal_fields() { diff --git a/ethcore/src/machine.rs b/ethcore/src/machine.rs index 5b37947db..b1354b17b 100644 --- a/ethcore/src/machine.rs +++ b/ethcore/src/machine.rs @@ -16,7 +16,7 @@ //! Ethereum-like state machine definition. -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::cmp; use std::sync::Arc; @@ -378,6 +378,13 @@ impl EthereumMachine { pub fn supports_wasm(&self) -> bool { self.params().wasm } + + /// Additional params. + pub fn additional_params(&self) -> HashMap { + hash_map![ + "registrar".to_owned() => self.params.registrar.hex() + ] + } } /// Auxiliary data fetcher for an Ethereum machine. In Ethereum-like machines From c4c2c77a40f8f416c6f782e41babdfe31e287015 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 5 Oct 2017 17:20:23 +0200 Subject: [PATCH 14/22] Fixed network protocol version negotiation --- util/network/src/session.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/util/network/src/session.rs b/util/network/src/session.rs index dbdf065eb..992081237 100644 --- a/util/network/src/session.rs +++ b/util/network/src/session.rs @@ -511,6 +511,7 @@ impl Session { i += 1; } debug!(target: "network", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); + let protocol = ::std::cmp::min(protocol, host.protocol_version); self.info.protocol_version = protocol; self.info.client_version = client_version; self.info.capabilities = caps; From 6431459bcf2fc6f7b47c9d949ce9149dd85decea Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 5 Oct 2017 23:37:41 +0300 Subject: [PATCH 15/22] SecretStore: ShareRemove of 'isolated' nodes (#6630) * SecretStore: ShareRemove from isolated nodes * SecretStore: ServersSetChange && isolated nodes * SecretStore: added threshold check + lost file * SecretStore: remove isolated nodes before other sessions in ServersSetChange * removed obsolete TODO --- .../key_server_cluster/admin_sessions/mod.rs | 10 +- .../servers_set_change_session.rs | 122 ++++++++++++---- .../admin_sessions/share_add_session.rs | 4 +- .../admin_sessions/share_change_session.rs | 58 ++++++-- .../admin_sessions/share_move_session.rs | 4 +- .../admin_sessions/share_remove_session.rs | 130 +++++++++++++++--- .../src/key_server_cluster/cluster.rs | 26 +++- .../key_server_cluster/cluster_sessions.rs | 9 +- .../src/key_server_cluster/message.rs | 2 + 9 files changed, 291 insertions(+), 74 deletions(-) diff --git a/secret_store/src/key_server_cluster/admin_sessions/mod.rs b/secret_store/src/key_server_cluster/admin_sessions/mod.rs index 68fddf6fa..205de06c9 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/mod.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/mod.rs @@ -22,7 +22,7 @@ pub mod share_remove_session; mod sessions_queue; -use key_server_cluster::{SessionId, NodeId, SessionMeta}; +use key_server_cluster::{SessionId, NodeId, SessionMeta, Error}; /// Share change session metadata. #[derive(Debug, Clone)] @@ -37,12 +37,12 @@ pub struct ShareChangeSessionMeta { impl ShareChangeSessionMeta { /// Convert to consensus session meta. `all_nodes_set` is the union of `old_nodes_set` && `new_nodes_set`. - pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> SessionMeta { - SessionMeta { + pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> Result { + Ok(SessionMeta { id: self.id, master_node_id: self.master_node_id, self_node_id: self.self_node_id, - threshold: all_nodes_set_len - 1, - } + threshold: all_nodes_set_len.checked_sub(1).ok_or(Error::ConsensusUnreachable)?, + }) } } diff --git a/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs index 651fd8a4c..2a82fb388 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs @@ -69,7 +69,7 @@ pub struct SessionImpl { } /// Session state. -#[derive(PartialEq)] +#[derive(Debug, PartialEq)] enum SessionState { /// Establishing consensus. EstablishingConsensus, @@ -205,7 +205,7 @@ impl SessionImpl { } let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len()), + meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?, consensus_executor: ServersSetChangeAccessJob::new_on_master(self.core.admin_public.clone(), self.core.all_nodes_set.clone(), self.core.all_nodes_set.clone(), @@ -277,7 +277,7 @@ impl SessionImpl { match &message.message { &ConsensusMessageWithServersSet::InitializeConsensusSession(_) => { data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len()), + meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?, consensus_executor: ServersSetChangeAccessJob::new_on_slave(self.core.admin_public.clone(), self.core.all_nodes_set.clone(), ), @@ -395,6 +395,7 @@ impl SessionImpl { true => return Err(Error::InvalidMessage), false => { let master_plan = ShareChangeSessionPlan { + isolated_nodes: message.isolated_nodes.iter().cloned().map(Into::into).collect(), nodes_to_add: message.shares_to_add.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), nodes_to_move: message.shares_to_move.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(), nodes_to_remove: message.shares_to_remove.iter().cloned().map(Into::into).collect(), @@ -409,8 +410,9 @@ impl SessionImpl { if let Ok(key_share) = self.core.key_storage.get(&key_id) { let new_nodes_set = data.new_nodes_set.as_ref() .expect("new_nodes_set is filled during consensus establishing; change sessions are running after this; qed"); - let local_plan = prepare_share_change_session_plan(&key_share.id_numbers.keys().cloned().collect(), new_nodes_set)?; - if local_plan.nodes_to_add.keys().any(|n| !local_plan.nodes_to_add.contains_key(n)) + let local_plan = prepare_share_change_session_plan(&self.core.all_nodes_set, &key_share.id_numbers.keys().cloned().collect(), new_nodes_set)?; + if local_plan.isolated_nodes != master_plan.isolated_nodes + || local_plan.nodes_to_add.keys().any(|n| !local_plan.nodes_to_add.contains_key(n)) || local_plan.nodes_to_add.keys().any(|n| !master_plan.nodes_to_add.contains_key(n)) || local_plan.nodes_to_move != master_plan.nodes_to_move || local_plan.nodes_to_remove != master_plan.nodes_to_remove { @@ -418,10 +420,13 @@ impl SessionImpl { } } - data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(&self.core, key_id, + let session = Self::create_share_change_session(&self.core, key_id, message.master_node_id.clone().into(), message.old_shares_set.iter().cloned().map(Into::into).collect(), - master_plan)?); + master_plan)?; + if !session.is_finished() { + data.active_key_sessions.insert(key_id.clone(), session); + } }, }; @@ -475,8 +480,17 @@ impl SessionImpl { }))); } - let key_session = data.active_key_sessions.get_mut(&key_id).ok_or(Error::InvalidMessage)?; - key_session.initialize() + // initialize share change session + { + let key_session = data.active_key_sessions.get_mut(&key_id).ok_or(Error::InvalidMessage)?; + key_session.initialize()?; + if !key_session.is_finished() { + return Ok(()); + } + } + + // complete key session + Self::complete_key_session(&self.core, &mut *data, true, key_id) } /// When sessions execution is delegated to this node. @@ -608,19 +622,7 @@ impl SessionImpl { }; if is_finished { - data.active_key_sessions.remove(&session_id); - let is_general_master = self.core.meta.self_node_id == self.core.meta.master_node_id; - if is_master && !is_general_master { - Self::return_delegated_session(&self.core, &session_id)?; - } - if is_general_master { - Self::disseminate_session_initialization_requests(&self.core, &mut *data)?; - } - - if data.result.is_some() && data.active_key_sessions.len() == 0 { - data.state = SessionState::Finished; - self.core.completed.notify_all(); - } + Self::complete_key_session(&self.core, &mut *data, is_master, session_id)?; } Ok(()) @@ -639,6 +641,7 @@ impl SessionImpl { cluster: core.cluster.clone(), key_storage: core.key_storage.clone(), old_nodes_set: old_nodes_set, + cluster_nodes_set: core.all_nodes_set.clone(), plan: session_plan, }) } @@ -659,7 +662,7 @@ impl SessionImpl { // prepare session change plan && check if something needs to be changed let old_nodes_set = queued_session.nodes(); - let session_plan = prepare_share_change_session_plan(&old_nodes_set, new_nodes_set)?; + let session_plan = prepare_share_change_session_plan(&core.all_nodes_set, &old_nodes_set, new_nodes_set)?; if session_plan.is_empty() { continue; } @@ -676,6 +679,7 @@ impl SessionImpl { let mut confirmations: BTreeSet<_> = old_nodes_set.iter().cloned() .chain(session_plan.nodes_to_add.keys().cloned()) .chain(session_plan.nodes_to_move.keys().cloned()) + .filter(|n| core.all_nodes_set.contains(n)) .collect(); let need_create_session = confirmations.remove(&core.meta.self_node_id); let initialization_message = Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession { @@ -684,6 +688,7 @@ impl SessionImpl { key_id: key_id.clone().into(), master_node_id: session_master.clone().into(), old_shares_set: old_nodes_set.iter().cloned().map(Into::into).collect(), + isolated_nodes: session_plan.isolated_nodes.iter().cloned().map(Into::into).collect(), shares_to_add: session_plan.nodes_to_add.iter() .map(|(n, nid)| (n.clone().into(), nid.clone().into())) .collect(), @@ -747,6 +752,25 @@ impl SessionImpl { }))) } + /// Complete key session. + fn complete_key_session(core: &SessionCore, data: &mut SessionData, is_master: bool, session_id: SessionId) -> Result<(), Error> { + data.active_key_sessions.remove(&session_id); + let is_general_master = core.meta.self_node_id == core.meta.master_node_id; + if is_master && !is_general_master { + Self::return_delegated_session(core, &session_id)?; + } + if is_general_master { + Self::disseminate_session_initialization_requests(core, data)?; + } + + if data.result.is_some() && data.active_key_sessions.len() == 0 { + data.state = SessionState::Finished; + core.completed.notify_all(); + } + + Ok(()) + } + /// Complete servers set change session. fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); @@ -916,7 +940,7 @@ pub mod tests { } impl MessageLoop { - pub fn new(gml: GenerationMessageLoop, master_node_id: NodeId, new_nodes_ids: BTreeSet, removed_nodes_ids: BTreeSet) -> Self { + pub fn new(gml: GenerationMessageLoop, master_node_id: NodeId, new_nodes_ids: BTreeSet, removed_nodes_ids: BTreeSet, isolated_nodes_ids: BTreeSet) -> Self { // generate admin key pair let admin_key_pair = Random.generate().unwrap(); let admin_public = admin_key_pair.public().clone(); @@ -928,12 +952,20 @@ pub mod tests { .iter()).unwrap(); let original_key_pair = KeyPair::from_secret(original_secret).unwrap(); - let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().collect(); + // all active nodes set + let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys() + .filter(|n| !isolated_nodes_ids.contains(n)) + .cloned() + .collect(); + // new nodes set includes all old nodes, except nodes being removed + all nodes being added let new_nodes_set: BTreeSet = all_nodes_set.iter().cloned() .chain(new_nodes_ids.iter().cloned()) .filter(|n| !removed_nodes_ids.contains(n)) .collect(); all_nodes_set.extend(new_nodes_ids.iter().cloned()); + for isolated_node_id in &isolated_nodes_ids { + all_nodes_set.remove(isolated_node_id); + } let meta = ShareChangeSessionMeta { self_node_id: master_node_id.clone(), @@ -958,6 +990,12 @@ pub mod tests { }); let nodes: BTreeMap<_, _> = old_nodes.chain(new_nodes).map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); + for node in nodes.values() { + for isolated_node_id in &isolated_nodes_ids { + node.cluster.remove_node(isolated_node_id); + } + } + let all_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&all_nodes_set)).unwrap(); let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); @@ -1018,7 +1056,7 @@ pub mod tests { // insert 1 node so that it becames 2-of-4 session let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); - let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add, BTreeSet::new()); + let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add, BTreeSet::new(), BTreeSet::new()); ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); ml.run(); @@ -1041,7 +1079,7 @@ pub mod tests { // 3) delegated session is returned back to added node let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); let master_node_id = nodes_to_add.iter().cloned().nth(0).unwrap(); - let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add, BTreeSet::new()); + let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add, BTreeSet::new(), BTreeSet::new()); ml.nodes[&master_node_id].session.initialize(ml.nodes.keys().cloned().collect(), ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); ml.run(); @@ -1058,7 +1096,7 @@ pub mod tests { // remove 1 node && insert 1 node so that one share is moved let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); let nodes_to_add: BTreeSet<_> = (0..1).map(|_| Random.generate().unwrap().public().clone()).collect(); - let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add.clone(), nodes_to_remove.clone()); + let mut ml = MessageLoop::new(gml, master_node_id, nodes_to_add.clone(), nodes_to_remove.clone(), BTreeSet::new()); let new_nodes_set = ml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(n)).collect(); ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); ml.run(); @@ -1085,7 +1123,7 @@ pub mod tests { // remove 1 node so that session becames 2-of-2 let nodes_to_remove: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_remove.contains(&n)).collect(); - let mut ml = MessageLoop::new(gml, master_node_id, BTreeSet::new(), nodes_to_remove.clone()); + let mut ml = MessageLoop::new(gml, master_node_id, BTreeSet::new(), nodes_to_remove.clone(), BTreeSet::new()); ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); ml.run(); @@ -1101,4 +1139,30 @@ pub mod tests { // check that all sessions have finished assert!(ml.nodes.values().all(|n| n.session.is_finished())); } + + #[test] + fn isolated_node_removed_using_servers_set_change() { + // initial 2-of-3 session + let gml = generate_key(1, generate_nodes_ids(3)); + let master_node_id = gml.nodes.keys().cloned().nth(0).unwrap(); + + // remove 1 node so that session becames 2-of-2 + let nodes_to_isolate: BTreeSet<_> = gml.nodes.keys().cloned().skip(1).take(1).collect(); + let new_nodes_set: BTreeSet<_> = gml.nodes.keys().cloned().filter(|n| !nodes_to_isolate.contains(&n)).collect(); + let mut ml = MessageLoop::new(gml, master_node_id, BTreeSet::new(), BTreeSet::new(), nodes_to_isolate.clone()); + ml.nodes[&master_node_id].session.initialize(new_nodes_set, ml.all_set_signature.clone(), ml.new_set_signature.clone()).unwrap(); + ml.run(); + + // try to recover secret for every possible combination of nodes && check that secret is the same + check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter() + .filter(|&(k, _)| !nodes_to_isolate.contains(k)) + .map(|(k, v)| (k.clone(), v.key_storage.clone())) + .collect()); + + // check that all isolated nodes still OWN key share + assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_isolate.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_ok())); + + // check that all sessions have finished + assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_isolate.contains(k)).all(|(_, v)| v.session.is_finished())); + } } diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs index bee88891e..ead4dc3dd 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_add_session.rs @@ -273,7 +273,7 @@ impl SessionImpl where T: SessionTransport { .map(|(k, v)| (k.clone(), v.clone().expect("new_nodes_map is updated above so that every value is_some; qed"))) .collect()); let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(new_nodes_set.len()), + meta: self.core.meta.clone().into_consensus_meta(new_nodes_set.len())?, consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public, old_nodes_set.clone(), old_nodes_set.clone(), @@ -329,7 +329,7 @@ impl SessionImpl where T: SessionTransport { .map(|ks| ks.id_numbers.keys().cloned().collect()) .unwrap_or_else(|| message.old_nodes_set.clone().into_iter().map(Into::into).collect()); data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(message.new_nodes_set.len()), + meta: self.core.meta.clone().into_consensus_meta(message.new_nodes_set.len())?, consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set), consensus_transport: self.core.transport.clone(), })?); diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_change_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_change_session.rs index 5cf9da377..aa13c0142 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_change_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_change_session.rs @@ -35,9 +35,10 @@ use key_server_cluster::message::{ShareAddMessage, ShareMoveMessage, ShareRemove use key_server_cluster::admin_sessions::ShareChangeSessionMeta; /// Single session meta-change session. Brief overview: -/// 1) new shares are added to the session -/// 2) shares are moved between nodes -/// 3) shares are removed from nodes +/// 1) nodes that have been already removed from cluster (isolated nodes) are removed from session +/// 2) new shares are added to the session +/// 3) shares are moved between nodes +/// 4) shares are removed from nodes pub struct ShareChangeSession { /// Servers set change session id. session_id: SessionId, @@ -51,6 +52,8 @@ pub struct ShareChangeSession { key_storage: Arc, /// Old nodes set. old_nodes_set: BTreeSet, + /// All cluster nodes set. + cluster_nodes_set: BTreeSet, /// Nodes to add shares for. nodes_to_add: Option>, /// Nodes to move shares from/to. @@ -68,7 +71,10 @@ pub struct ShareChangeSession { } /// Share change session plan. +#[derive(Debug)] pub struct ShareChangeSessionPlan { + /// Nodes that are isolated and need to be removed before share addition. + pub isolated_nodes: BTreeSet, /// Nodes to add shares for. pub nodes_to_add: BTreeMap, /// Nodes to move shares from/to (keys = target nodes, values = source nodes). @@ -89,6 +95,8 @@ pub struct ShareChangeSessionParams { pub cluster: Arc, /// Keys storage. pub key_storage: Arc, + /// All cluster nodes set. + pub cluster_nodes_set: BTreeSet, /// Old nodes set. pub old_nodes_set: BTreeSet, /// Session plan. @@ -110,11 +118,19 @@ impl ShareChangeSession { /// Create new share change session. pub fn new(params: ShareChangeSessionParams) -> Result { // we can't create sessions right now, because key share is read when session is created, but it can change in previous session + let isolated_nodes = if !params.plan.isolated_nodes.is_empty() { Some(params.plan.isolated_nodes) } else { None }; let nodes_to_add = if !params.plan.nodes_to_add.is_empty() { Some(params.plan.nodes_to_add) } else { None }; let nodes_to_remove = if !params.plan.nodes_to_remove.is_empty() { Some(params.plan.nodes_to_remove) } else { None }; let nodes_to_move = if !params.plan.nodes_to_move.is_empty() { Some(params.plan.nodes_to_move) } else { None }; - debug_assert!(nodes_to_add.is_some() || nodes_to_move.is_some() || nodes_to_remove.is_some()); + debug_assert!(isolated_nodes.is_some() || nodes_to_add.is_some() || nodes_to_move.is_some() || nodes_to_remove.is_some()); + // if it is degenerated session (only isolated nodes are removed && no network communication required) + // => remove isolated nodes && finish session + if let Some(isolated_nodes) = isolated_nodes { + Self::remove_isolated_nodes(¶ms.meta, ¶ms.key_storage, isolated_nodes)?; + } + + let is_finished = nodes_to_add.is_none() && nodes_to_remove.is_none() && nodes_to_move.is_none(); Ok(ShareChangeSession { session_id: params.session_id, nonce: params.nonce, @@ -122,13 +138,14 @@ impl ShareChangeSession { cluster: params.cluster, key_storage: params.key_storage, old_nodes_set: params.old_nodes_set, + cluster_nodes_set: params.cluster_nodes_set, nodes_to_add: nodes_to_add, nodes_to_remove: nodes_to_remove, nodes_to_move: nodes_to_move, share_add_session: None, share_move_session: None, share_remove_session: None, - is_finished: false, + is_finished: is_finished, }) } @@ -246,6 +263,7 @@ impl ShareChangeSession { let share_remove_session = ShareRemoveSessionImpl::new(ShareRemoveSessionParams { meta: self.meta.clone(), nonce: self.nonce, + cluster_nodes_set: self.cluster_nodes_set.clone(), transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), key_storage: self.key_storage.clone(), admin_public: None, @@ -289,6 +307,18 @@ impl ShareChangeSession { Ok(()) } + + /// Remove isolated nodes from key share. + fn remove_isolated_nodes(meta: &ShareChangeSessionMeta, key_storage: &Arc, isolated_nodes: BTreeSet) -> Result<(), Error> { + let mut key_share = key_storage.get(&meta.id).map_err(|e| Error::KeyStorage(e.into()))?; + for isolated_node in &isolated_nodes { + key_share.id_numbers.remove(isolated_node); + } + if key_share.id_numbers.len() < key_share.threshold + 1 { + return Err(Error::InvalidNodesConfiguration); + } + key_storage.update(meta.id.clone(), key_share).map_err(|e| Error::KeyStorage(e.into())) + } } impl ShareChangeTransport { @@ -353,10 +383,20 @@ impl ShareRemoveSessionTransport for ShareChangeTransport { } /// Prepare share change plan for moving from old `session_nodes` to `new_nodes_set`. -pub fn prepare_share_change_session_plan(session_nodes: &BTreeSet, new_nodes_set: &BTreeSet) -> Result { +pub fn prepare_share_change_session_plan(cluster_nodes_set: &BTreeSet, session_nodes: &BTreeSet, new_nodes_set: &BTreeSet) -> Result { let mut nodes_to_add: BTreeSet<_> = new_nodes_set.difference(&session_nodes).cloned().collect(); let mut nodes_to_move = BTreeMap::new(); - let mut nodes_to_remove: BTreeSet<_> = session_nodes.difference(&new_nodes_set).cloned().collect(); + // isolated nodes are the nodes that are not currently in cluster + that are in new nodes set + let isolated_nodes: BTreeSet<_> = session_nodes.difference(&cluster_nodes_set) + .filter(|n| !new_nodes_set.contains(n)) + .cloned() + .collect(); + // removed nodes are all old session nodes, except nodes that are in new set + except isolated nodes + let mut nodes_to_remove: BTreeSet<_> = session_nodes.difference(&new_nodes_set) + .filter(|n| !isolated_nodes.contains(n)) + .cloned() + .collect(); + while !nodes_to_remove.is_empty() && !nodes_to_add.is_empty() { let source_node = nodes_to_remove.iter().cloned().nth(0).expect("nodes_to_remove.is_empty is checked in while condition; qed"); let target_node = nodes_to_add.iter().cloned().nth(0).expect("nodes_to_add.is_empty is checked in while condition; qed"); @@ -366,6 +406,7 @@ pub fn prepare_share_change_session_plan(session_nodes: &BTreeSet, new_n } Ok(ShareChangeSessionPlan { + isolated_nodes: isolated_nodes, nodes_to_add: nodes_to_add.into_iter() .map(|n| math::generate_random_scalar().map(|s| (n, s))) .collect::, _>>()?, @@ -377,7 +418,8 @@ pub fn prepare_share_change_session_plan(session_nodes: &BTreeSet, new_n impl ShareChangeSessionPlan { /// Is empty (nothing-to-do) plan? pub fn is_empty(&self) -> bool { - self.nodes_to_add.is_empty() + self.isolated_nodes.is_empty() + && self.nodes_to_add.is_empty() && self.nodes_to_move.is_empty() && self.nodes_to_remove.is_empty() } diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_move_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_move_session.rs index 0dc3cce84..a19a9eee0 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_move_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_move_session.rs @@ -206,7 +206,7 @@ impl SessionImpl where T: SessionTransport { consensus_transport.set_shares_to_move_reversed(shares_to_move_reversed.clone()); let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(all_nodes_set.len()), + meta: self.core.meta.clone().into_consensus_meta(all_nodes_set.len())?, consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public, old_nodes_set.clone(), old_nodes_set.clone(), @@ -263,7 +263,7 @@ impl SessionImpl where T: SessionTransport { .unwrap_or_else(|| message.old_nodes_set.clone().into_iter().map(Into::into).collect()); let all_nodes_set_len = message.new_nodes_set.keys().chain(message.old_nodes_set.iter()).collect::>().len(); data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(all_nodes_set_len), + meta: self.core.meta.clone().into_consensus_meta(all_nodes_set_len)?, consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set), consensus_transport: self.core.transport.clone(), })?); diff --git a/secret_store/src/key_server_cluster/admin_sessions/share_remove_session.rs b/secret_store/src/key_server_cluster/admin_sessions/share_remove_session.rs index 83824fe93..8829af51f 100644 --- a/secret_store/src/key_server_cluster/admin_sessions/share_remove_session.rs +++ b/secret_store/src/key_server_cluster/admin_sessions/share_remove_session.rs @@ -58,6 +58,8 @@ struct SessionCore { pub nonce: u64, /// Original key share. pub key_share: DocumentKeyShare, + /// All known cluster nodes. + pub cluster_nodes_set: BTreeSet, /// Session transport to communicate to other cluster nodes. pub transport: T, /// Key storage. @@ -91,6 +93,8 @@ pub struct SessionParams { pub meta: ShareChangeSessionMeta, /// Session nonce. pub nonce: u64, + /// All known cluster nodes. + pub cluster_nodes_set: BTreeSet, /// Session transport to communicate to other cluster nodes. pub transport: T, /// Key storage. @@ -129,6 +133,7 @@ impl SessionImpl where T: SessionTransport { meta: params.meta.clone(), nonce: params.nonce, key_share: params.key_storage.get(¶ms.meta.id).map_err(|e| Error::KeyStorage(e.into()))?, + cluster_nodes_set: params.cluster_nodes_set, transport: params.transport, key_storage: params.key_storage, admin_public: params.admin_public, @@ -155,8 +160,19 @@ impl SessionImpl where T: SessionTransport { check_shares_to_remove(&self.core, &shares_to_remove)?; - data.remove_confirmations_to_receive = Some(shares_to_remove.clone()); + let remove_confirmations_to_receive: BTreeSet = shares_to_remove.iter() + .filter(|n| self.core.cluster_nodes_set.contains(n)) + .cloned() + .collect(); + let need_wait_for_confirmations = !remove_confirmations_to_receive.is_empty(); data.shares_to_remove = Some(shares_to_remove); + data.remove_confirmations_to_receive = Some(remove_confirmations_to_receive); + + // on slave nodes it can happen that all nodes being removed are isolated + // => there's no need to wait for confirmations + if !need_wait_for_confirmations { + Self::complete_session(&self.core, &mut *data)?; + } Ok(()) } @@ -167,6 +183,10 @@ impl SessionImpl where T: SessionTransport { let mut data = self.data.lock(); // check state + if data.state == SessionState::Finished { + // probably there are isolated nodes && we only remove isolated nodes from session + return Ok(()); + } if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { return Err(Error::InvalidStateForRequest); } @@ -174,28 +194,33 @@ impl SessionImpl where T: SessionTransport { // if consensus is not yet established => start consensus session let is_consensus_pre_established = data.shares_to_remove.is_some(); if !is_consensus_pre_established { - // TODO: even if node was lost, it is still required for ShareRemove session to complete. - // It is wrong - if node is not in all_nodes_set, it must be excluded from consensus. let shares_to_remove = shares_to_remove.ok_or(Error::InvalidMessage)?; check_shares_to_remove(&self.core, &shares_to_remove)?; let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?; let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?; - let all_nodes_set: BTreeSet<_> = self.core.key_share.id_numbers.keys().cloned().collect(); - let new_nodes_set: BTreeSet<_> = all_nodes_set.iter().cloned().filter(|n| !shares_to_remove.contains(&n)).collect(); + let old_nodes_set: BTreeSet<_> = self.core.key_share.id_numbers.keys().cloned().collect(); + let new_nodes_set: BTreeSet<_> = old_nodes_set.iter().cloned().filter(|n| !shares_to_remove.contains(&n)).collect(); + let mut active_nodes_set = old_nodes_set.clone(); let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; + // if some session nodes were removed from cluster (we treat this as a failure, or as a 'improper' removal) + // => do not require these nodes to be connected + for isolated_node in old_nodes_set.difference(&self.core.cluster_nodes_set) { + active_nodes_set.remove(&isolated_node); + } + let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(all_nodes_set.len()), + meta: self.core.meta.clone().into_consensus_meta(active_nodes_set.len())?, consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public, - all_nodes_set.clone(), - all_nodes_set.clone(), + old_nodes_set.clone(), + old_nodes_set, new_nodes_set, old_set_signature, new_set_signature), consensus_transport: self.core.transport.clone(), })?; - consensus_session.initialize(all_nodes_set)?; + consensus_session.initialize(active_nodes_set)?; data.consensus_session = Some(consensus_session); data.remove_confirmations_to_receive = Some(shares_to_remove.clone()); data.shares_to_remove = Some(shares_to_remove); @@ -237,7 +262,7 @@ impl SessionImpl where T: SessionTransport { let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?; let current_nodes_set = self.core.key_share.id_numbers.keys().cloned().collect(); data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { - meta: self.core.meta.clone().into_consensus_meta(message.old_nodes_set.len()), + meta: self.core.meta.clone().into_consensus_meta(message.old_nodes_set.len())?, consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set), consensus_transport: self.core.transport.clone(), })?); @@ -360,9 +385,13 @@ impl SessionImpl where T: SessionTransport { { let shares_to_remove = data.shares_to_remove.as_ref() .expect("shares_to_remove is filled when consensus is established; on_consensus_established is called after consensus is established; qed"); - if !shares_to_remove.contains(&core.meta.self_node_id) { + let remove_confirmations_to_receive: BTreeSet<_> = shares_to_remove.iter() + .filter(|n| core.cluster_nodes_set.contains(n)) + .cloned() + .collect(); + if !shares_to_remove.contains(&core.meta.self_node_id) && !remove_confirmations_to_receive.is_empty() { // remember remove confirmations to receive - data.remove_confirmations_to_receive = Some(shares_to_remove.iter().cloned().collect()); + data.remove_confirmations_to_receive = Some(remove_confirmations_to_receive); return Ok(()); } } @@ -375,7 +404,7 @@ impl SessionImpl where T: SessionTransport { fn disseminate_share_remove_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { let shares_to_remove = data.shares_to_remove.as_ref() .expect("shares_to_remove is filled when consensus is established; disseminate_share_remove_requests is called after consensus is established; qed"); - for node in shares_to_remove.iter().filter(|n| **n != core.meta.self_node_id) { + for node in shares_to_remove.iter().filter(|n| **n != core.meta.self_node_id && core.cluster_nodes_set.contains(n)) { core.transport.send(node, ShareRemoveMessage::ShareRemoveRequest(ShareRemoveRequest { session: core.meta.id.clone().into(), session_nonce: core.nonce, @@ -396,7 +425,7 @@ impl SessionImpl where T: SessionTransport { if shares_to_remove.contains(&core.meta.self_node_id) { // send confirmation to all other nodes let new_nodes_set = core.key_share.id_numbers.keys().filter(|n| !shares_to_remove.contains(n)).collect::>(); - for node in new_nodes_set.into_iter().filter(|n| **n != core.meta.self_node_id) { + for node in new_nodes_set.into_iter().filter(|n| **n != core.meta.self_node_id && core.cluster_nodes_set.contains(n)) { core.transport.send(&node, ShareRemoveMessage::ShareRemoveConfirm(ShareRemoveConfirm { session: core.meta.id.clone().into(), session_nonce: core.nonce, @@ -527,7 +556,6 @@ mod tests { use std::collections::{VecDeque, BTreeMap, BTreeSet}; use ethkey::{Random, Generator, Public, Signature, KeyPair, sign}; use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage}; - use key_server_cluster::cluster::Cluster; use key_server_cluster::cluster_sessions::ClusterSession; use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids}; @@ -556,7 +584,7 @@ mod tests { pub queue: VecDeque<(NodeId, NodeId, Message)>, } - fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc, key_storage: Arc) -> SessionImpl { + fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc, key_storage: Arc, all_cluster_nodes: BTreeSet) -> SessionImpl { let session_id = meta.id.clone(); meta.self_node_id = self_node_id; SessionImpl::new(SessionParams { @@ -564,15 +592,16 @@ mod tests { transport: IsolatedSessionTransport::new(session_id, 1, cluster), key_storage: key_storage, admin_public: Some(admin_public), + cluster_nodes_set: all_cluster_nodes, nonce: 1, }).unwrap() } - fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode) -> Node { + fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode, all_nodes_set: BTreeSet) -> Node { Node { cluster: node.cluster.clone(), key_storage: node.key_storage.clone(), - session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage), + session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage, all_nodes_set), } } @@ -600,7 +629,7 @@ mod tests { .filter(|n| !shares_to_remove.contains(n)) .cloned() .collect(); - let nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1)); + let nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1, old_nodes_set.clone())); let nodes = nodes.map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect(); let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap(); @@ -687,7 +716,7 @@ mod tests { let t = 1; let test_cases = vec![(3, 1), (5, 3)]; for (n, nodes_to_remove) in test_cases { - // generate key && prepare ShareMove sessions + // generate key && prepare ShareRemove sessions let old_nodes_set = generate_nodes_ids(n); let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(nodes_to_remove).collect(); @@ -715,7 +744,7 @@ mod tests { let t = 1; let test_cases = vec![(3, 1), (5, 3)]; for (n, nodes_to_remove) in test_cases { - // generate key && prepare ShareMove sessions + // generate key && prepare ShareRemove sessions let old_nodes_set = generate_nodes_ids(n); let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect(); @@ -737,4 +766,63 @@ mod tests { .collect()); } } + + #[test] + fn nodes_are_removed_even_if_some_other_nodes_are_isolated_from_cluster() { + let t = 1; + let (n, nodes_to_remove, nodes_to_isolate) = (5, 1, 2); + + // generate key && prepare ShareRemove sessions + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect(); + let nodes_to_isolate: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1 + nodes_to_remove.len()).take(nodes_to_isolate).collect(); + let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone()); + + // simulate node failure - isolate nodes (it is removed from cluster completely, but it is still a part of session) + for node_to_isolate in &nodes_to_isolate { + ml.nodes.remove(node_to_isolate); + } + for node in ml.nodes.values_mut() { + for node_to_isolate in &nodes_to_isolate { + node.session.core.cluster_nodes_set.remove(node_to_isolate); + node.cluster.remove_node(node_to_isolate); + } + } + + // initialize session on master node && run to completion + ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())).unwrap(); + ml.run(); + } + + #[test] + fn nodes_are_removed_even_if_isolated_from_cluster() { + let t = 1; + let (n, nodes_to_isolate_and_remove) = (5, 3); + + // generate key && prepare ShareRemove sessions + let old_nodes_set = generate_nodes_ids(n); + let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap(); + let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_isolate_and_remove).collect(); + let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone()); + + // simulate node failure - isolate nodes (it is removed from cluster completely, but it is still a part of session) + for node_to_isolate in &nodes_to_remove { + ml.nodes.remove(node_to_isolate); + } + for node in ml.nodes.values_mut() { + for node_to_isolate in &nodes_to_remove { + node.session.core.cluster_nodes_set.remove(node_to_isolate); + node.cluster.remove_node(node_to_isolate); + } + } + + // initialize session on master node && run to completion + ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()), + Some(ml.old_set_signature.clone()), + Some(ml.new_set_signature.clone())).unwrap(); + ml.run(); + } } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 74464a6a9..00ea03aca 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -1008,11 +1008,13 @@ impl ClusterCore { ConsensusMessageWithServersSet::InitializeConsensusSession(_) => true, _ => false, } => { + let mut all_cluster_nodes = data.connections.all_nodes(); + all_cluster_nodes.insert(data.self_key_pair.public().clone()); let mut connected_nodes = data.connections.connected_nodes(); connected_nodes.insert(data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); - match data.sessions.new_share_remove_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster) { + match data.sessions.new_share_remove_session(sender.clone(), session_id.clone(), Some(session_nonce), cluster, all_cluster_nodes) { Ok(session) => Ok(session), Err(err) => { // this is new session => it is not yet in container @@ -1149,6 +1151,10 @@ impl ClusterConnections { } } + pub fn all_nodes(&self) -> BTreeSet { + self.data.read().nodes.keys().cloned().collect() + } + pub fn connected_nodes(&self) -> BTreeSet { self.data.read().connections.keys().cloned().collect() } @@ -1413,11 +1419,13 @@ impl ClusterClient for ClusterClientImpl { } fn new_share_remove_session(&self, session_id: SessionId, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { + let mut all_cluster_nodes = self.data.connections.all_nodes(); + all_cluster_nodes.insert(self.data.self_key_pair.public().clone()); let mut connected_nodes = self.data.connections.connected_nodes(); connected_nodes.insert(self.data.self_key_pair.public().clone()); let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes)); - let session = self.data.sessions.new_share_remove_session(self.data.self_key_pair.public().clone(), session_id, None, cluster)?; + let session = self.data.sessions.new_share_remove_session(self.data.self_key_pair.public().clone(), session_id, None, cluster, all_cluster_nodes)?; session.as_share_remove() .expect("created 1 line above; qed") .initialize(Some(new_nodes_set), Some(old_set_signature), Some(new_set_signature))?; @@ -1425,11 +1433,11 @@ impl ClusterClient for ClusterClientImpl { } fn new_servers_set_change_session(&self, session_id: Option, new_nodes_set: BTreeSet, old_set_signature: Signature, new_set_signature: Signature) -> Result, Error> { - let mut connected_nodes = self.data.connections.connected_nodes(); - connected_nodes.insert(self.data.self_key_pair.public().clone()); + let mut all_cluster_nodes = self.data.connections.all_nodes(); + all_cluster_nodes.insert(self.data.self_key_pair.public().clone()); - let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); - let session = self.data.sessions.new_servers_set_change_session(self.data.self_key_pair.public().clone(), session_id, None, cluster, connected_nodes)?; + let cluster = Arc::new(ClusterView::new(self.data.clone(), all_cluster_nodes.clone())); + let session = self.data.sessions.new_servers_set_change_session(self.data.self_key_pair.public().clone(), session_id, None, cluster, all_cluster_nodes)?; let session_id = { let servers_set_change_session = session.as_servers_set_change().expect("created 1 line above; qed"); servers_set_change_session.initialize(new_nodes_set, old_set_signature, new_set_signature)?; @@ -1501,6 +1509,12 @@ pub mod tests { self.data.lock().nodes.push(node); } + pub fn remove_node(&self, node: &NodeId) { + let mut data = self.data.lock(); + let position = data.nodes.iter().position(|n| n == node).unwrap(); + data.nodes.remove(position); + } + pub fn take_message(&self) -> Option<(NodeId, Message)> { self.data.lock().messages.pop_front() } diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index aa6244dbc..e2f7b0621 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -433,7 +433,7 @@ impl ClusterSessions { } /// Create new share remove session. - pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { + pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc, all_nodes_set: BTreeSet) -> Result, Error> { let nonce = self.check_session_nonce(&master, nonce)?; let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?; @@ -443,6 +443,7 @@ impl ClusterSessions { self_node_id: self.self_node_id.clone(), master_node_id: master, }, + cluster_nodes_set: all_nodes_set, transport: ShareRemoveTransport::new(session_id.clone(), nonce, cluster), key_storage: self.key_storage.clone(), admin_public: Some(admin_public), @@ -464,6 +465,12 @@ impl ClusterSessions { /// Create new servers set change session. pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option, nonce: Option, cluster: Arc, all_nodes_set: BTreeSet) -> Result, Error> { + // communicating to all other nodes is crucial for ServersSetChange session + // => check that we have connections to all cluster nodes + if self.nodes.iter().any(|n| !cluster.is_connected(n)) { + return Err(Error::NodeDisconnected); + } + let session_id = match session_id { Some(session_id) => if session_id == *SERVERS_SET_CHANGE_SESSION_ID { session_id diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index 527cf3c2a..ce588313b 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -632,6 +632,8 @@ pub struct InitializeShareChangeSession { pub master_node_id: MessageNodeId, /// Old nodes set. pub old_shares_set: BTreeSet, + /// Isolated nodes. + pub isolated_nodes: BTreeSet, /// Shares to add. Values are filled for new nodes only. pub shares_to_add: BTreeMap, /// Shares to move. From 4e9d439f3907fdabe821450b9ffd91e65d319660 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 5 Oct 2017 23:38:23 +0300 Subject: [PATCH 16/22] SecretStore: session level timeout (#6631) * SecretStore: session level timeout * removed obsolete TODO --- .../src/key_server_cluster/cluster.rs | 27 +++++---- .../key_server_cluster/cluster_sessions.rs | 58 +++++++++++++++++-- .../src/key_server_cluster/message.rs | 2 + 3 files changed, 71 insertions(+), 16 deletions(-) diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 00ea03aca..acfc116ac 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -357,6 +357,7 @@ impl ClusterCore { /// Send keepalive messages to every othe node. fn keep_alive(data: Arc) { + data.sessions.sessions_keep_alive(); for connection in data.connections.active_connections() { let last_message_diff = time::Instant::now() - connection.last_message_time(); if last_message_diff > time::Duration::from_secs(KEEP_ALIVE_DISCONNECT_INTERVAL) { @@ -460,7 +461,7 @@ impl ClusterCore { } }, _ => { - data.sessions.generation_sessions.get(&session_id) + data.sessions.generation_sessions.get(&session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -538,7 +539,7 @@ impl ClusterCore { } }, _ => { - data.sessions.encryption_sessions.get(&session_id) + data.sessions.encryption_sessions.get(&session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -629,7 +630,7 @@ impl ClusterCore { } }, _ => { - data.sessions.decryption_sessions.get(&decryption_session_id) + data.sessions.decryption_sessions.get(&decryption_session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -705,7 +706,7 @@ impl ClusterCore { } }, _ => { - data.sessions.signing_sessions.get(&signing_session_id) + data.sessions.signing_sessions.get(&signing_session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -784,7 +785,7 @@ impl ClusterCore { } }, _ => { - data.sessions.admin_sessions.get(&session_id) + data.sessions.admin_sessions.get(&session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -865,7 +866,7 @@ impl ClusterCore { } }, _ => { - data.sessions.admin_sessions.get(&session_id) + data.sessions.admin_sessions.get(&session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -946,7 +947,7 @@ impl ClusterCore { } }, _ => { - data.sessions.admin_sessions.get(&session_id) + data.sessions.admin_sessions.get(&session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -1029,7 +1030,7 @@ impl ClusterCore { } }, _ => { - data.sessions.admin_sessions.get(&session_id) + data.sessions.admin_sessions.get(&session_id, true) .ok_or(Error::InvalidSessionId) }, }; @@ -1084,8 +1085,12 @@ impl ClusterCore { /// Process single cluster message from the connection. fn process_cluster_message(data: Arc, connection: Arc, message: ClusterMessage) { match message { - ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {})))), - ClusterMessage::KeepAliveResponse(_) => (), + ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { + session_id: None, + })))), + ClusterMessage::KeepAliveResponse(msg) => if let Some(session_id) = msg.session_id { + data.sessions.on_session_keep_alive(connection.node_id(), session_id.into()); + }, _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()), } } @@ -1459,7 +1464,7 @@ impl ClusterClient for ClusterClientImpl { #[cfg(test)] fn generation_session(&self, session_id: &SessionId) -> Option> { - self.data.sessions.generation_sessions.get(session_id) + self.data.sessions.generation_sessions.get(session_id, false) } } diff --git a/secret_store/src/key_server_cluster/cluster_sessions.rs b/secret_store/src/key_server_cluster/cluster_sessions.rs index e2f7b0621..f22b6a762 100644 --- a/secret_store/src/key_server_cluster/cluster_sessions.rs +++ b/secret_store/src/key_server_cluster/cluster_sessions.rs @@ -47,6 +47,8 @@ use key_server_cluster::admin_sessions::ShareChangeSessionMeta; /// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores /// session messages. const SESSION_TIMEOUT_INTERVAL: u64 = 60; +/// Interval to send session-level KeepAlive-messages. +const SESSION_KEEP_ALIVE_INTERVAL: u64 = 30; lazy_static! { /// Servers set change session id (there could be at most 1 session => hardcoded id). @@ -129,6 +131,8 @@ pub struct QueuedSession { pub master: NodeId, /// Cluster view. pub cluster_view: Arc, + /// Last keep alive time. + pub last_keep_alive_time: time::Instant, /// Last received message time. pub last_message_time: time::Instant, /// Generation session. @@ -224,6 +228,18 @@ impl ClusterSessions { self.make_faulty_generation_sessions.store(true, Ordering::Relaxed); } + /// Send session-level keep-alive messages. + pub fn sessions_keep_alive(&self) { + self.admin_sessions.send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id); + } + + /// When session-level keep-alive response is received. + pub fn on_session_keep_alive(&self, sender: &NodeId, session_id: SessionId) { + if session_id == *SERVERS_SET_CHANGE_SESSION_ID { + self.admin_sessions.on_keep_alive(&session_id, sender); + } + } + /// Create new generation session. pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, nonce: Option, cluster: Arc) -> Result, Error> { // check that there's no finished encryption session with the same id @@ -514,9 +530,6 @@ impl ClusterSessions { self.encryption_sessions.stop_stalled_sessions(); self.decryption_sessions.stop_stalled_sessions(); self.signing_sessions.stop_stalled_sessions(); - // TODO: servers set change session could take a lot of time - // && during that session some nodes could not receive messages - // => they could stop session as stalled. This must be handled self.admin_sessions.stop_stalled_sessions(); } @@ -571,8 +584,15 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster self.sessions.read().is_empty() } - pub fn get(&self, session_id: &K) -> Option> { - self.sessions.read().get(session_id).map(|s| s.session.clone()) + pub fn get(&self, session_id: &K, update_last_message_time: bool) -> Option> { + let mut sessions = self.sessions.write(); + sessions.get_mut(session_id) + .map(|s| { + if update_last_message_time { + s.last_message_time = time::Instant::now(); + } + s.session.clone() + }) } pub fn insert Result>(&self, master: NodeId, session_id: K, cluster: Arc, is_exclusive_session: bool, session: F) -> Result, Error> { @@ -590,6 +610,7 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster let queued_session = QueuedSession { master: master, cluster_view: cluster, + last_keep_alive_time: time::Instant::now(), last_message_time: time::Instant::now(), session: session.clone(), queue: VecDeque::new(), @@ -649,6 +670,33 @@ impl ClusterSessionsContainer where K: Clone + Ord, V: Cluster } } +impl ClusterSessionsContainer where K: Clone + Ord, V: ClusterSession, SessionId: From { + pub fn send_keep_alive(&self, session_id: &K, self_node_id: &NodeId) { + if let Some(session) = self.sessions.write().get_mut(session_id) { + let now = time::Instant::now(); + if self_node_id == &session.master && now - session.last_keep_alive_time > time::Duration::from_secs(SESSION_KEEP_ALIVE_INTERVAL) { + session.last_keep_alive_time = now; + // since we send KeepAlive message to prevent nodes from disconnecting + // && worst thing that can happen if node is disconnected is that session is failed + // => ignore error here, because probably this node is not need for the rest of the session at all + let _ = session.cluster_view.broadcast(Message::Cluster(message::ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { + session_id: Some(session_id.clone().into()), + }))); + } + } + } + + pub fn on_keep_alive(&self, session_id: &K, sender: &NodeId) { + if let Some(session) = self.sessions.write().get_mut(session_id) { + let now = time::Instant::now(); + // we only accept keep alive from master node of ServersSetChange session + if sender == &session.master { + session.last_keep_alive_time = now; + } + } + } +} + impl ClusterSessionsContainerState { /// When session is starting. pub fn on_session_starting(&mut self, is_exclusive_session: bool) -> Result<(), Error> { diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index ce588313b..873852027 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -255,6 +255,8 @@ pub struct KeepAlive { /// Confirm that the node is still alive. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeepAliveResponse { + /// Session id, if used for session-level keep alive. + pub session_id: Option, } /// Initialize new DKG session. From 77a2c77c45fd4383b1638e223a3085974791852a Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Fri, 6 Oct 2017 09:11:17 +0200 Subject: [PATCH 17/22] Renamed RPC receipt statusCode field to status (#6650) --- rpc/src/v1/tests/mocked/eth.rs | 2 +- rpc/src/v1/types/receipt.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 1a0ceb5b2..8bd97108c 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -1020,7 +1020,7 @@ fn rpc_eth_transaction_receipt() { "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","contractAddress":null,"cumulativeGasUsed":"0x20","gasUsed":"0x10","logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","data":"0x","logIndex":"0x1","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"}],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","root":"0x0000000000000000000000000000000000000000000000000000000000000000","statusCode":null,"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","contractAddress":null,"cumulativeGasUsed":"0x20","gasUsed":"0x10","logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","data":"0x","logIndex":"0x1","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","transactionLogIndex":"0x0","type":"mined"}],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","root":"0x0000000000000000000000000000000000000000000000000000000000000000","status":null,"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0"},"id":1}"#; assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/types/receipt.rs b/rpc/src/v1/types/receipt.rs index ca313a440..bb8af2cd0 100644 --- a/rpc/src/v1/types/receipt.rs +++ b/rpc/src/v1/types/receipt.rs @@ -50,7 +50,7 @@ pub struct Receipt { #[serde(rename="logsBloom")] pub logs_bloom: H2048, /// Status code - #[serde(rename="statusCode")] + #[serde(rename="status")] pub status_code: Option, } @@ -131,7 +131,7 @@ mod tests { #[test] fn receipt_serialization() { - let s = r#"{"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","cumulativeGasUsed":"0x20","gasUsed":"0x10","contractAddress":null,"logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":null,"type":"mined"}],"root":"0x000000000000000000000000000000000000000000000000000000000000000a","logsBloom":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f","statusCode":null}"#; + let s = r#"{"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","cumulativeGasUsed":"0x20","gasUsed":"0x10","contractAddress":null,"logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":null,"type":"mined"}],"root":"0x000000000000000000000000000000000000000000000000000000000000000a","logsBloom":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f","status":null}"#; let receipt = Receipt { transaction_hash: Some(0.into()), From 1b45870af812e17a7a24f6d03dd334c7da281e82 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Fri, 6 Oct 2017 13:46:11 +0200 Subject: [PATCH 18/22] Tweaked block download timeouts (#6595) (#6655) --- sync/src/block_sync.rs | 2 +- sync/src/chain.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/block_sync.rs b/sync/src/block_sync.rs index 68fa65564..fbd7eddae 100644 --- a/sync/src/block_sync.rs +++ b/sync/src/block_sync.rs @@ -32,7 +32,7 @@ use sync_io::SyncIo; use blocks::BlockCollection; const MAX_HEADERS_TO_REQUEST: usize = 128; -const MAX_BODIES_TO_REQUEST: usize = 64; +const MAX_BODIES_TO_REQUEST: usize = 32; const MAX_RECEPITS_TO_REQUEST: usize = 128; const SUBCHAIN_SIZE: u64 = 256; const MAX_ROUND_PARENTS: usize = 16; diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 05e979bbd..7423f8254 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -170,7 +170,7 @@ const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 3; const WAIT_PEERS_TIMEOUT_SEC: u64 = 5; const STATUS_TIMEOUT_SEC: u64 = 5; const HEADERS_TIMEOUT_SEC: u64 = 15; -const BODIES_TIMEOUT_SEC: u64 = 10; +const BODIES_TIMEOUT_SEC: u64 = 20; const RECEIPTS_TIMEOUT_SEC: u64 = 10; const FORK_HEADER_TIMEOUT_SEC: u64 = 3; const SNAPSHOT_MANIFEST_TIMEOUT_SEC: u64 = 5; From 59365b0133be777b1a1ec6cd4e1b64596eb04eb9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 6 Oct 2017 17:36:26 +0200 Subject: [PATCH 19/22] fix aura backcompat: revert to manual encoding/decoding of transition proofs (#6665) --- ethcore/src/engines/epoch.rs | 38 ++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/ethcore/src/engines/epoch.rs b/ethcore/src/engines/epoch.rs index b5ffd8a2d..c765f727d 100644 --- a/ethcore/src/engines/epoch.rs +++ b/ethcore/src/engines/epoch.rs @@ -18,8 +18,10 @@ use bigint::hash::H256; +use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; + /// A full epoch transition. -#[derive(Debug, Clone, RlpEncodable, RlpDecodable)] +#[derive(Debug, Clone)] pub struct Transition { /// Block hash at which the transition occurred. pub block_hash: H256, @@ -29,14 +31,46 @@ pub struct Transition { pub proof: Vec, } +impl Encodable for Transition { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append(&self.block_hash) + .append(&self.block_number) + .append(&self.proof); + } +} + +impl Decodable for Transition { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(Transition { + block_hash: rlp.val_at(0)?, + block_number: rlp.val_at(1)?, + proof: rlp.val_at(2)?, + }) + } +} + /// An epoch transition pending a finality proof. /// Not all transitions need one. -#[derive(RlpEncodableWrapper, RlpDecodableWrapper)] pub struct PendingTransition { /// "transition/epoch" proof from the engine. pub proof: Vec, } +impl Encodable for PendingTransition { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.proof); + } +} + +impl Decodable for PendingTransition { + fn decode(rlp: &UntrustedRlp) -> Result { + Ok(PendingTransition { + proof: rlp.as_val()?, + }) + } +} + /// Verifier for all blocks within an epoch with self-contained state. pub trait EpochVerifier: Send + Sync { /// Lightly verify the next block header. From 360ecd37283d3a156f217ab4ae76e3e355c8b2a4 Mon Sep 17 00:00:00 2001 From: Afri Schoedon <5chdn@users.noreply.github.com> Date: Sun, 8 Oct 2017 18:17:59 +0200 Subject: [PATCH 20/22] Add Musicoin and MCIP-3 UBI hardfork. (#6621) * Add musicoin chain spec. * Add musicoin to parity node * Add musicoin to the wallet * Add i18n for musicoin * Align musicoin chain spec with 1.8, ref #6134 * Update musicoin bootnodes * Prepare MCIP-3 in musicoin chain spec. * Update musicoin chain spec with contract addresses for MCIP-3 * Extend ethash params by MCIP-3 * Fix musicoin chain spec json * Use U256 for block rewards. * Update musicoin registrar * Fix merge leftovers * Update musicoin chain spec for latest master * Bestow MCIP-3 block reward(s). * Update musicoin registry once and for all * Align MCIP-3 block reward with go implementation * Remove mcip3 test chain spec from repository * Update MCIP-3 block rewards * Musicoin homestead transition is at 1_150_000 * Expect mcip3 transtion to be properly defined in chain spec. * Panic handling for mcip to default to regular block rewards if not specified * Giving mcip3 rewards a useful default value. * Fix ethjson tests. * Update musicoin chain spec * Fix tests 0:) * Add musicoin mcip3 era test spec. * Update musicoin chain spec(s) * Add tests for mcip3 era block rewards * Fix tests * Disable byzantium for musicoin * Pass miner reward to the tracer. * Allow modifying blockreward in MCIP-3 transition. --- ethcore/res/ethereum/classic.json | 2 - ethcore/res/ethereum/mcip3_test.json | 167 ++++++++++++++++++ ethcore/res/ethereum/musicoin.json | 167 ++++++++++++++++++ ethcore/src/ethereum/ethash.rs | 62 ++++++- ethcore/src/ethereum/mod.rs | 11 ++ ethcore/src/tests/helpers.rs | 6 + js/src/i18n/_default/settings.js | 1 + js/src/i18n/nl/settings.js | 1 + js/src/i18n/zh-Hant-TW/settings.js | 1 + js/src/i18n/zh/settings.js | 1 + js/src/jsonrpc/interfaces/parity.js | 2 +- .../CurrencySymbol/currencySymbol.example.js | 6 + js/src/ui/CurrencySymbol/currencySymbol.js | 4 + .../ui/CurrencySymbol/currencySymbol.spec.js | 8 + js/src/views/Settings/Parity/parity.js | 8 + json/src/spec/ethash.rs | 32 ++++ parity/cli/mod.rs | 2 +- parity/params.rs | 6 + rpc/src/v1/traits/parity_set.rs | 2 +- 19 files changed, 478 insertions(+), 11 deletions(-) create mode 100644 ethcore/res/ethereum/mcip3_test.json create mode 100644 ethcore/res/ethereum/musicoin.json diff --git a/ethcore/res/ethereum/classic.json b/ethcore/res/ethereum/classic.json index 5f931cf8b..5f6e3af83 100644 --- a/ethcore/res/ethereum/classic.json +++ b/ethcore/res/ethereum/classic.json @@ -14,7 +14,6 @@ "ecip1010PauseTransition": 3000000, "ecip1010ContinueTransition": 5000000, "ecip1017EraRounds": 5000000, - "eip161abcTransition": "0x7fffffffffffffff", "eip161dTransition": "0x7fffffffffffffff" } @@ -31,7 +30,6 @@ "forkBlock": "0x1d4c00", "forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f", "eip155Transition": 3000000, - "eip98Transition": "0x7fffffffffffff", "eip86Transition": "0x7fffffffffffff" }, diff --git a/ethcore/res/ethereum/mcip3_test.json b/ethcore/res/ethereum/mcip3_test.json new file mode 100644 index 000000000..098e146e3 --- /dev/null +++ b/ethcore/res/ethereum/mcip3_test.json @@ -0,0 +1,167 @@ +{ + "name":"MCIP3 Test", + "dataDir":"mcip3test", + "engine":{ + "Ethash":{ + "params":{ + "minimumDifficulty":"0x020000", + "difficultyBoundDivisor":"0x0800", + "durationLimit":"0x0d", + "homesteadTransition":"0x118c30", + "eip100bTransition":"0x7fffffffffffff", + "eip150Transition":"0x7fffffffffffff", + "eip160Transition":"0x7fffffffffffff", + "eip161abcTransition":"0x7fffffffffffff", + "eip161dTransition":"0x7fffffffffffff", + "eip649Transition":"0x7fffffffffffff", + "blockReward":"0x1105a0185b50a80000", + "mcip3Transition":"0x00", + "mcip3MinerReward":"0xd8d726b7177a80000", + "mcip3UbiReward":"0x2b5e3af16b1880000", + "mcip3UbiContract":"0x00efdd5883ec628983e9063c7d969fe268bbf310", + "mcip3DevReward":"0xc249fdd327780000", + "mcip3DevContract":"0x00756cf8159095948496617f5fb17ed95059f536" + } + } + }, + "params":{ + "gasLimitBoundDivisor":"0x0400", + "registrar":"0x5C271c4C9A67E7D73b7b3669d47504741354f21D", + "accountStartNonce":"0x00", + "maximumExtraDataSize":"0x20", + "minGasLimit":"0x1388", + "networkID":"0x76740b", + "forkBlock":"0x5b6", + "forkCanonHash":"0xa5e88ad9e34d113e264e307bc27e8471452c8fc13780324bb3abb96fd0558343", + "eip86Transition":"0x7fffffffffffff", + "eip98Transition":"0x7fffffffffffff", + "eip140Transition":"0x7fffffffffffff", + "eip155Transition":"0x7fffffffffffff", + "eip211Transition":"0x7fffffffffffff", + "eip214Transition":"0x7fffffffffffff", + "eip658Transition":"0x7fffffffffffff", + "maxCodeSize":"0x6000" + }, + "genesis":{ + "seal":{ + "ethereum":{ + "nonce":"0x000000000000002a", + "mixHash":"0x00000000000000000000000000000000000000647572616c65787365646c6578" + } + }, + "difficulty":"0x3d0900", + "author":"0x0000000000000000000000000000000000000000", + "timestamp":"0x00", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData":"", + "gasLimit":"0x7a1200" + }, + "nodes":[ + "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", + "enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303", + "enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303", + "enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303", + "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", + "enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303", + "enode://d302f52c8789ad87ee528f1431a67f1aa646c9bec17babb4665dfb3d61de5b9119a70aa77b2147a5f28854092ba09769323c1c552a6ac6f6a34cbcf767e2d2fe@158.69.248.48:30303", + "enode://c72564bce8331ae298fb8ece113a456e3927d7e5989c2be3e445678b3600579f722410ef9bbfe339335d676af77343cb21b5b1703b7bebc32be85fce937a2220@191.252.185.71:30303", + "enode://e3ae4d25ee64791ff98bf17c37acf90933359f2505c00f65c84f6863231a32a94153cadb0a462e428f18f35ded6bd91cd91033d26576a28558c22678be9cfaee@5.63.158.137:35555" + ], + "accounts":{ + "0000000000000000000000000000000000000001":{ + "balance":"1", + "builtin":{ + "name":"ecrecover", + "pricing":{ + "linear":{ + "base":3000, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000002":{ + "balance":"1", + "builtin":{ + "name":"sha256", + "pricing":{ + "linear":{ + "base":60, + "word":12 + } + } + } + }, + "0000000000000000000000000000000000000003":{ + "balance":"1", + "builtin":{ + "name":"ripemd160", + "pricing":{ + "linear":{ + "base":600, + "word":120 + } + } + } + }, + "0000000000000000000000000000000000000004":{ + "balance":"1", + "builtin":{ + "name":"identity", + "pricing":{ + "linear":{ + "base":15, + "word":3 + } + } + } + }, + "0000000000000000000000000000000000000005":{ + "builtin":{ + "name":"modexp", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "modexp":{ + "divisor":20 + } + } + } + }, + "0000000000000000000000000000000000000006":{ + "builtin":{ + "name":"alt_bn128_add", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "linear":{ + "base":500, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000007":{ + "builtin":{ + "name":"alt_bn128_mul", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "linear":{ + "base":40000, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000008":{ + "builtin":{ + "name":"alt_bn128_pairing", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "alt_bn128_pairing":{ + "base":100000, + "pair":80000 + } + } + } + } + } +} diff --git a/ethcore/res/ethereum/musicoin.json b/ethcore/res/ethereum/musicoin.json new file mode 100644 index 000000000..cf4d4ffba --- /dev/null +++ b/ethcore/res/ethereum/musicoin.json @@ -0,0 +1,167 @@ +{ + "name":"Musicoin", + "dataDir":"musicoin", + "engine":{ + "Ethash":{ + "params":{ + "minimumDifficulty":"0x020000", + "difficultyBoundDivisor":"0x0800", + "durationLimit":"0x0d", + "homesteadTransition":"0x118c30", + "eip100bTransition":"0x7fffffffffffff", + "eip150Transition":"0x7fffffffffffff", + "eip160Transition":"0x7fffffffffffff", + "eip161abcTransition":"0x7fffffffffffff", + "eip161dTransition":"0x7fffffffffffff", + "eip649Transition":"0x7fffffffffffff", + "blockReward":"0x1105a0185b50a80000", + "mcip3Transition":"0x124f81", + "mcip3MinerReward":"0xd8d726b7177a80000", + "mcip3UbiReward":"0x2b5e3af16b1880000", + "mcip3UbiContract":"0x00efdd5883ec628983e9063c7d969fe268bbf310", + "mcip3DevReward":"0xc249fdd327780000", + "mcip3DevContract":"0x00756cf8159095948496617f5fb17ed95059f536" + } + } + }, + "params":{ + "gasLimitBoundDivisor":"0x0400", + "registrar":"0x5C271c4C9A67E7D73b7b3669d47504741354f21D", + "accountStartNonce":"0x00", + "maximumExtraDataSize":"0x20", + "minGasLimit":"0x1388", + "networkID":"0x76740f", + "forkBlock":"0x5b6", + "forkCanonHash":"0xa5e88ad9e34d113e264e307bc27e8471452c8fc13780324bb3abb96fd0558343", + "eip86Transition":"0x7fffffffffffff", + "eip98Transition":"0x7fffffffffffff", + "eip140Transition":"0x7fffffffffffff", + "eip155Transition":"0x7fffffffffffff", + "eip211Transition":"0x7fffffffffffff", + "eip214Transition":"0x7fffffffffffff", + "eip658Transition":"0x7fffffffffffff", + "maxCodeSize":"0x6000" + }, + "genesis":{ + "seal":{ + "ethereum":{ + "nonce":"0x000000000000002a", + "mixHash":"0x00000000000000000000000000000000000000647572616c65787365646c6578" + } + }, + "difficulty":"0x3d0900", + "author":"0x0000000000000000000000000000000000000000", + "timestamp":"0x00", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData":"", + "gasLimit":"0x7a1200" + }, + "nodes":[ + "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", + "enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303", + "enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303", + "enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303", + "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", + "enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303", + "enode://d302f52c8789ad87ee528f1431a67f1aa646c9bec17babb4665dfb3d61de5b9119a70aa77b2147a5f28854092ba09769323c1c552a6ac6f6a34cbcf767e2d2fe@158.69.248.48:30303", + "enode://c72564bce8331ae298fb8ece113a456e3927d7e5989c2be3e445678b3600579f722410ef9bbfe339335d676af77343cb21b5b1703b7bebc32be85fce937a2220@191.252.185.71:30303", + "enode://e3ae4d25ee64791ff98bf17c37acf90933359f2505c00f65c84f6863231a32a94153cadb0a462e428f18f35ded6bd91cd91033d26576a28558c22678be9cfaee@5.63.158.137:35555" + ], + "accounts":{ + "0000000000000000000000000000000000000001":{ + "balance":"1", + "builtin":{ + "name":"ecrecover", + "pricing":{ + "linear":{ + "base":3000, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000002":{ + "balance":"1", + "builtin":{ + "name":"sha256", + "pricing":{ + "linear":{ + "base":60, + "word":12 + } + } + } + }, + "0000000000000000000000000000000000000003":{ + "balance":"1", + "builtin":{ + "name":"ripemd160", + "pricing":{ + "linear":{ + "base":600, + "word":120 + } + } + } + }, + "0000000000000000000000000000000000000004":{ + "balance":"1", + "builtin":{ + "name":"identity", + "pricing":{ + "linear":{ + "base":15, + "word":3 + } + } + } + }, + "0000000000000000000000000000000000000005":{ + "builtin":{ + "name":"modexp", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "modexp":{ + "divisor":20 + } + } + } + }, + "0000000000000000000000000000000000000006":{ + "builtin":{ + "name":"alt_bn128_add", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "linear":{ + "base":500, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000007":{ + "builtin":{ + "name":"alt_bn128_mul", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "linear":{ + "base":40000, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000008":{ + "builtin":{ + "name":"alt_bn128_pairing", + "activate_at":"0x7fffffffffffff", + "pricing":{ + "alt_bn128_pairing":{ + "base":100000, + "pair":80000 + } + } + } + } + } +} diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index e0a85ab9f..a49ba2e2a 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -22,6 +22,7 @@ use hash::{KECCAK_EMPTY_LIST_RLP}; use ethash::{quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor}; use bigint::prelude::U256; use bigint::hash::{H256, H64}; +use util::Address; use unexpected::{OutOfBounds, Mismatch}; use block::*; use error::{BlockError, Error}; @@ -69,6 +70,18 @@ pub struct EthashParams { pub ecip1010_continue_transition: u64, /// Total block number for one ECIP-1017 era. pub ecip1017_era_rounds: u64, + /// Number of first block where MCIP-3 begins. + pub mcip3_transition: u64, + /// MCIP-3 Block reward coin-base for miners. + pub mcip3_miner_reward: U256, + /// MCIP-3 Block reward ubi-base for basic income. + pub mcip3_ubi_reward: U256, + /// MCIP-3 contract address for universal basic income. + pub mcip3_ubi_contract: Address, + /// MCIP-3 Block reward dev-base for dev funds. + pub mcip3_dev_reward: U256, + /// MCIP-3 contract address for the developer funds. + pub mcip3_dev_contract: Address, /// Block reward in base units. pub block_reward: U256, /// EIP-649 transition block. @@ -95,6 +108,12 @@ impl From for EthashParams { ecip1010_pause_transition: p.ecip1010_pause_transition.map_or(u64::max_value(), Into::into), ecip1010_continue_transition: p.ecip1010_continue_transition.map_or(u64::max_value(), Into::into), ecip1017_era_rounds: p.ecip1017_era_rounds.map_or(u64::max_value(), Into::into), + mcip3_transition: p.mcip3_transition.map_or(u64::max_value(), Into::into), + mcip3_miner_reward: p.mcip3_miner_reward.map_or_else(Default::default, Into::into), + mcip3_ubi_reward: p.mcip3_ubi_reward.map_or(U256::from(0), Into::into), + mcip3_ubi_contract: p.mcip3_ubi_contract.map_or_else(Address::new, Into::into), + mcip3_dev_reward: p.mcip3_dev_reward.map_or(U256::from(0), Into::into), + mcip3_dev_contract: p.mcip3_dev_contract.map_or_else(Address::new, Into::into), block_reward: p.block_reward.map_or_else(Default::default, Into::into), eip649_transition: p.eip649_transition.map_or(u64::max_value(), Into::into), eip649_delay: p.eip649_delay.map_or(DEFAULT_EIP649_DELAY, Into::into), @@ -184,24 +203,38 @@ impl Engine for Arc { let author = *LiveBlock::header(&*block).author(); let number = LiveBlock::header(&*block).number(); + // Applies EIP-649 reward. let reward = if number >= self.ethash_params.eip649_transition { self.ethash_params.eip649_reward.unwrap_or(self.ethash_params.block_reward) } else { self.ethash_params.block_reward }; + // Applies ECIP-1017 eras. let eras_rounds = self.ethash_params.ecip1017_era_rounds; let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, reward, number); let n_uncles = LiveBlock::uncles(&*block).len(); - // Bestow block reward - let result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); + // Bestow block rewards. + let mut result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); let mut uncle_rewards = Vec::with_capacity(n_uncles); - self.machine.add_balance(block, &author, &result_block_reward)?; + if number >= self.ethash_params.mcip3_transition { + result_block_reward = self.ethash_params.mcip3_miner_reward; + let ubi_contract = self.ethash_params.mcip3_ubi_contract; + let ubi_reward = self.ethash_params.mcip3_ubi_reward; + let dev_contract = self.ethash_params.mcip3_dev_contract; + let dev_reward = self.ethash_params.mcip3_dev_reward; - // bestow uncle rewards. + self.machine.add_balance(block, &author, &result_block_reward)?; + self.machine.add_balance(block, &ubi_contract, &ubi_reward)?; + self.machine.add_balance(block, &dev_contract, &dev_reward)?; + } else { + self.machine.add_balance(block, &author, &result_block_reward)?; + } + + // Bestow uncle rewards. for u in LiveBlock::uncles(&*block) { let uncle_author = u.author(); let result_uncle_reward = if eras == 0 { @@ -217,7 +250,7 @@ impl Engine for Arc { self.machine.add_balance(block, a, reward)?; } - // note and trace. + // Note and trace. self.machine.note_rewards(block, &[(author, result_block_reward)], &uncle_rewards) } @@ -432,7 +465,7 @@ mod tests { use error::{BlockError, Error}; use header::Header; use spec::Spec; - use super::super::{new_morden, new_homestead_test_machine}; + use super::super::{new_morden, new_mcip3_test, new_homestead_test_machine}; use super::{Ethash, EthashParams, ecip1017_eras_block_reward}; use rlp; @@ -502,6 +535,23 @@ mod tests { assert_eq!(b.state().balance(&uncle_author).unwrap(), "3cb71f51fc558000".into()); } + #[test] + fn has_valid_mcip3_era_block_rewards() { + let spec = new_mcip3_test(); + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); + let b = b.close(); + + let ubi_contract: Address = "00efdd5883ec628983e9063c7d969fe268bbf310".into(); + let dev_contract: Address = "00756cf8159095948496617f5fb17ed95059f536".into(); + assert_eq!(b.state().balance(&Address::zero()).unwrap(), U256::from_str("d8d726b7177a80000").unwrap()); + assert_eq!(b.state().balance(&ubi_contract).unwrap(), U256::from_str("2b5e3af16b1880000").unwrap()); + assert_eq!(b.state().balance(&dev_contract).unwrap(), U256::from_str("c249fdd327780000").unwrap()); + } + #[test] fn has_valid_metadata() { let engine = test_spec().engine; diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 0b45113d4..75c321ebe 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -70,6 +70,11 @@ pub fn new_expanse<'a, T: Into>>(params: T) -> Spec { load(params.into(), include_bytes!("../../res/ethereum/expanse.json")) } +/// Create a new Musicoin mainnet chain spec. +pub fn new_musicoin<'a, T: Into>>(params: T) -> Spec { + load(params.into(), include_bytes!("../../res/ethereum/musicoin.json")) +} + /// Create a new Kovan testnet chain spec. pub fn new_kovan<'a, T: Into>>(params: T) -> Spec { load(params.into(), include_bytes!("../../res/ethereum/kovan.json")) @@ -111,6 +116,9 @@ pub fn new_byzantium_test() -> Spec { load(None, include_bytes!("../../res/ether /// Create a new Foundation Constantinople era spec. pub fn new_constantinople_test() -> Spec { load(None, include_bytes!("../../res/ethereum/constantinople_test.json")) } +/// Create a new Musicoin-MCIP3-era spec. +pub fn new_mcip3_test() -> Spec { load(None, include_bytes!("../../res/ethereum/mcip3_test.json")) } + // For tests /// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead. @@ -125,6 +133,9 @@ pub fn new_byzantium_test_machine() -> EthereumMachine { load_machine(include_by /// Create a new Foundation Constantinople era spec. pub fn new_constantinople_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/constantinople_test.json")) } +/// Create a new Musicoin-MCIP3-era spec. +pub fn new_mcip3_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/mcip3_test.json")) } + #[cfg(test)] mod tests { use bigint::prelude::U256; diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index df5e83226..30e74e179 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -377,6 +377,12 @@ pub fn get_default_ethash_params() -> EthashParams { ecip1010_pause_transition: u64::max_value(), ecip1010_continue_transition: u64::max_value(), ecip1017_era_rounds: u64::max_value(), + mcip3_transition: u64::max_value(), + mcip3_miner_reward: 0.into(), + mcip3_ubi_reward: 0.into(), + mcip3_ubi_contract: "0000000000000000000000000000000000000001".into(), + mcip3_dev_reward: 0.into(), + mcip3_dev_contract: "0000000000000000000000000000000000000001".into(), eip649_transition: u64::max_value(), eip649_delay: 3_000_000, eip649_reward: None, diff --git a/js/src/i18n/_default/settings.js b/js/src/i18n/_default/settings.js index aef412b48..29f56badb 100644 --- a/js/src/i18n/_default/settings.js +++ b/js/src/i18n/_default/settings.js @@ -26,6 +26,7 @@ export default { chain_classic: `Parity syncs to the Ethereum Classic network`, chain_dev: `Parity uses a local development chain`, chain_expanse: `Parity syncs to the Expanse network`, + chain_musicoin: `Parity syncs to the Musicoin network`, chain_foundation: `Parity syncs to the Ethereum network launched by the Ethereum Foundation`, chain_kovan: `Parity syncs to the Kovan test network`, chain_olympic: `Parity syncs to the Olympic test network`, diff --git a/js/src/i18n/nl/settings.js b/js/src/i18n/nl/settings.js index f436d39c6..2aedd9d9b 100644 --- a/js/src/i18n/nl/settings.js +++ b/js/src/i18n/nl/settings.js @@ -26,6 +26,7 @@ export default { chain_classic: `Parity synchroniseert met het Ethereum Classic netwerk`, chain_dev: `Parity gebruikt een lokale ontwikkelaars chain`, chain_expanse: `Parity synchroniseert met het Expanse netwerk`, + chain_musicoin: `Parity synchroniseert met het Musicoin netwerk`, chain_foundation: `Parity synchroniseert met het Ethereum netwerk wat door de Ethereum Foundation is uitgebracht`, chain_kovan: `Parity synchroniseert met het Kovan test netwerk`, chain_olympic: `Parity synchroniseert met het Olympic test netwerk`, diff --git a/js/src/i18n/zh-Hant-TW/settings.js b/js/src/i18n/zh-Hant-TW/settings.js index 8841279dc..1bcb38758 100644 --- a/js/src/i18n/zh-Hant-TW/settings.js +++ b/js/src/i18n/zh-Hant-TW/settings.js @@ -30,6 +30,7 @@ export default { chain_classic: `將Parity同步至以太坊經典網路`, // Parity syncs to the Ethereum Classic network chain_dev: `將Parity使用一條本地開發用區塊鏈`, // Parity uses a local development chain chain_expanse: `將Parity同步至Expanse網路`, // Parity syncs to the Expanse network + chain_musicoin: `將Parity同步至Musicoin網路`, // Parity syncs to the Musicoin network chain_foundation: `將Parity同步至以太坊基金會發起的以太坊網路`, // Parity syncs to the Ethereum network launched by the Ethereum Foundation chain_kovan: `將Parity同步至Kovan測試網路`, // Parity syncs to the Kovan test network chain_olympic: `將Parity同步至Olympic測試網路`, // Parity syncs to the Olympic test network diff --git a/js/src/i18n/zh/settings.js b/js/src/i18n/zh/settings.js index 4081a06d6..752d124a1 100644 --- a/js/src/i18n/zh/settings.js +++ b/js/src/i18n/zh/settings.js @@ -30,6 +30,7 @@ export default { chain_classic: `将Parity同步至以太坊经典网络`, // Parity syncs to the Ethereum Classic network chain_dev: `将Parity使用一条本地开发用区块链`, // Parity uses a local development chain chain_expanse: `将Parity同步至Expanse网络`, // Parity syncs to the Expanse network + chain_musicoin: `将Parity同步至Musicoin网络`, // Parity syncs to the Musicoin network chain_foundation: `将Parity同步至以太坊基金会发起的以太坊网络`, // Parity syncs to the Ethereum network launched by the Ethereum Foundation chain_kovan: `将Parity同步至Kovan测试网络`, // Parity syncs to the Kovan test network chain_olympic: `将Parity同步至Olympic测试网络`, // Parity syncs to the Olympic test network diff --git a/js/src/jsonrpc/interfaces/parity.js b/js/src/jsonrpc/interfaces/parity.js index 997b7d434..2dde67b6f 100644 --- a/js/src/jsonrpc/interfaces/parity.js +++ b/js/src/jsonrpc/interfaces/parity.js @@ -1756,7 +1756,7 @@ export default { params: [ { type: String, - desc: 'Chain spec name, one of: "foundation", "ropsten", "morden", "kovan", "olympic", "classic", "dev", "expanse" or a filename.', + desc: 'Chain spec name, one of: "foundation", "ropsten", "morden", "kovan", "olympic", "classic", "dev", "expanse", "musicoin" or a filename.', example: 'foundation' } ], diff --git a/js/src/ui/CurrencySymbol/currencySymbol.example.js b/js/src/ui/CurrencySymbol/currencySymbol.example.js index c1b56ed5c..1f33dbba8 100644 --- a/js/src/ui/CurrencySymbol/currencySymbol.example.js +++ b/js/src/ui/CurrencySymbol/currencySymbol.example.js @@ -45,6 +45,12 @@ export default class CurrencySymbolExample extends Component { netChain='expanse' /> + + + + ); } diff --git a/js/src/ui/CurrencySymbol/currencySymbol.js b/js/src/ui/CurrencySymbol/currencySymbol.js index 3322b0301..ec7f98458 100644 --- a/js/src/ui/CurrencySymbol/currencySymbol.js +++ b/js/src/ui/CurrencySymbol/currencySymbol.js @@ -20,6 +20,7 @@ import { connect } from 'react-redux'; const SYMBOL_ETC = 'ETC'; const SYMBOL_ETH = 'ETH'; const SYMBOL_EXP = 'EXP'; +const SYMBOL_MUSIC = 'MUSIC'; export class CurrencySymbol extends Component { static propTypes = { @@ -45,6 +46,9 @@ export class CurrencySymbol extends Component { case 'expanse': return SYMBOL_EXP; + case 'musicoin': + return SYMBOL_MUSIC; + default: return SYMBOL_ETH; } diff --git a/js/src/ui/CurrencySymbol/currencySymbol.spec.js b/js/src/ui/CurrencySymbol/currencySymbol.spec.js index e705b5edc..f10557bd2 100644 --- a/js/src/ui/CurrencySymbol/currencySymbol.spec.js +++ b/js/src/ui/CurrencySymbol/currencySymbol.spec.js @@ -74,6 +74,10 @@ describe('ui/CurrencySymbol', () => { expect(render('expanse').text()).equal('EXP'); }); + it('renders MUSIC for musicoin', () => { + expect(render('musicoin').text()).equal('MUSIC'); + }); + it('renders ETH as default', () => { expect(render('somethingElse').text()).equal('ETH'); }); @@ -95,5 +99,9 @@ describe('ui/CurrencySymbol', () => { it('render EXP', () => { expect(render('expanse').instance().renderSymbol()).equal('EXP'); }); + + it('render MUSIC', () => { + expect(render('musicoin').instance().renderSymbol()).equal('MUSIC'); + }); }); }); diff --git a/js/src/views/Settings/Parity/parity.js b/js/src/views/Settings/Parity/parity.js index 9cf2c4f86..853ba3aa1 100644 --- a/js/src/views/Settings/Parity/parity.js +++ b/js/src/views/Settings/Parity/parity.js @@ -263,6 +263,14 @@ export default class Parity extends Component { /> )) } + { + this.renderItem('musicoin', ( + + )) + } { this.renderItem('dev', ( , + + /// See main EthashParams docs. + #[serde(rename="mcip3Transition")] + pub mcip3_transition: Option, + /// See main EthashParams docs. + #[serde(rename="mcip3MinerReward")] + pub mcip3_miner_reward: Option, + /// See main EthashParams docs. + #[serde(rename="mcip3UbiReward")] + pub mcip3_ubi_reward: Option, + /// See main EthashParams docs. + #[serde(rename="mcip3UbiContract")] + pub mcip3_ubi_contract: Option
, + /// See main EthashParams docs. + #[serde(rename="mcip3DevReward")] + pub mcip3_dev_reward: Option, + /// See main EthashParams docs. + #[serde(rename="mcip3DevContract")] + pub mcip3_dev_contract: Option
, + /// EIP-649 transition block. #[serde(rename="eip649Transition")] pub eip649_transition: Option, @@ -212,6 +232,12 @@ mod tests { ecip1010_pause_transition: None, ecip1010_continue_transition: None, ecip1017_era_rounds: None, + mcip3_transition: None, + mcip3_miner_reward: None, + mcip3_ubi_reward: None, + mcip3_ubi_contract: None, + mcip3_dev_reward: None, + mcip3_dev_contract: None, eip649_transition: None, eip649_delay: None, eip649_reward: None, @@ -252,6 +278,12 @@ mod tests { ecip1010_pause_transition: None, ecip1010_continue_transition: None, ecip1017_era_rounds: None, + mcip3_transition: None, + mcip3_miner_reward: None, + mcip3_ubi_reward: None, + mcip3_ubi_contract: None, + mcip3_dev_reward: None, + mcip3_dev_contract: None, eip649_transition: None, eip649_delay: None, eip649_reward: None, diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 7e14eab6d..dc6c2fe7b 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -301,7 +301,7 @@ usage! { ARG arg_chain: (String) = "foundation", or |c: &Config| otry!(c.parity).chain.clone(), "--chain=[CHAIN]", - "Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, homestead, mainnet, morden, ropsten, classic, expanse, testnet, kovan or dev.", + "Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, homestead, mainnet, morden, ropsten, classic, expanse, musicoin, testnet, kovan or dev.", ARG arg_keys_path: (String) = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), "--keys-path=[PATH]", diff --git a/parity/params.rs b/parity/params.rs index 2c0e534ba..f508b59f1 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -35,6 +35,7 @@ pub enum SpecType { Olympic, Classic, Expanse, + Musicoin, Dev, Custom(String), } @@ -57,6 +58,7 @@ impl str::FromStr for SpecType { "kovan" | "testnet" => SpecType::Kovan, "olympic" => SpecType::Olympic, "expanse" => SpecType::Expanse, + "musicoin" => SpecType::Musicoin, "dev" => SpecType::Dev, other => SpecType::Custom(other.into()), }; @@ -73,6 +75,7 @@ impl fmt::Display for SpecType { SpecType::Olympic => "olympic", SpecType::Classic => "classic", SpecType::Expanse => "expanse", + SpecType::Musicoin => "musicoin", SpecType::Kovan => "kovan", SpecType::Dev => "dev", SpecType::Custom(ref custom) => custom, @@ -90,6 +93,7 @@ impl SpecType { SpecType::Olympic => Ok(ethereum::new_olympic(params)), SpecType::Classic => Ok(ethereum::new_classic(params)), SpecType::Expanse => Ok(ethereum::new_expanse(params)), + SpecType::Musicoin => Ok(ethereum::new_musicoin(params)), SpecType::Kovan => Ok(ethereum::new_kovan(params)), SpecType::Dev => Ok(Spec::new_instant()), SpecType::Custom(ref filename) => { @@ -103,6 +107,7 @@ impl SpecType { match *self { SpecType::Classic => Some("classic".to_owned()), SpecType::Expanse => Some("expanse".to_owned()), + SpecType::Musicoin => Some("musicoin".to_owned()), _ => None, } } @@ -353,6 +358,7 @@ mod tests { assert_eq!(format!("{}", SpecType::Olympic), "olympic"); assert_eq!(format!("{}", SpecType::Classic), "classic"); assert_eq!(format!("{}", SpecType::Expanse), "expanse"); + assert_eq!(format!("{}", SpecType::Musicoin), "musicoin"); assert_eq!(format!("{}", SpecType::Kovan), "kovan"); assert_eq!(format!("{}", SpecType::Dev), "dev"); assert_eq!(format!("{}", SpecType::Custom("foo/bar".into())), "foo/bar"); diff --git a/rpc/src/v1/traits/parity_set.rs b/rpc/src/v1/traits/parity_set.rs index cd964daa3..7b3c593dc 100644 --- a/rpc/src/v1/traits/parity_set.rs +++ b/rpc/src/v1/traits/parity_set.rs @@ -87,7 +87,7 @@ build_rpc_trait! { #[rpc(name = "parity_setMode")] fn set_mode(&self, String) -> Result; - /// Set the network spec. Argument must be one of: "foundation", "ropsten", "morden", "kovan", "olympic", "classic", "dev", "expanse" or a filename. + /// Set the network spec. Argument must be one of: "foundation", "ropsten", "morden", "kovan", "olympic", "classic", "dev", "expanse", "musicoin" or a filename. #[rpc(name = "parity_setChain")] fn set_spec_name(&self, String) -> Result; From b010fb5004977d9370e582f3ccebfc7fe4495c72 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 8 Oct 2017 18:19:27 +0200 Subject: [PATCH 21/22] Light Client: fetch transactions/receipts by transaction hash (#6641) * rpc: transaction/receipt requests made async * rpc: light client fetches transaction and uncle by hash/index * on_demand: request type for transaction index * serve transaction index requests in light protocol * add a test for transaction index serving * fetch transaction and receipts by hash on light client * fix decoding tests * light: more lenient cost table parsing (backwards compatible) * fix tests and warnings * LES -> PIP * Update provider.rs * proper doc comments for public functions --- ethcore/light/src/net/mod.rs | 10 +- ethcore/light/src/net/request_credits.rs | 136 ++++++++++++++--------- ethcore/light/src/net/tests/mod.rs | 69 ++++++++++-- ethcore/light/src/on_demand/mod.rs | 1 + ethcore/light/src/on_demand/request.rs | 46 ++++++++ ethcore/light/src/provider.rs | 24 +++- rpc/src/v1/helpers/light_fetch.rs | 119 ++++++++++++++++++-- rpc/src/v1/impls/eth.rs | 42 ++++--- rpc/src/v1/impls/light/eth.rs | 103 ++++++++++++++--- rpc/src/v1/tests/mocked/eth.rs | 4 +- rpc/src/v1/tests/mocked/parity_set.rs | 2 +- rpc/src/v1/tests/mocked/signer.rs | 2 +- rpc/src/v1/tests/mocked/signing.rs | 2 +- rpc/src/v1/traits/eth.rs | 14 +-- rpc/src/v1/types/transaction.rs | 2 +- 15 files changed, 462 insertions(+), 114 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index bd9e3e1fd..ee14ad976 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -430,7 +430,11 @@ impl LightProtocol { // compute and deduct cost. let pre_creds = creds.current(); - let cost = params.compute_cost_multi(requests.requests()); + let cost = match params.compute_cost_multi(requests.requests()) { + Some(cost) => cost, + None => return Err(Error::NotServer), + }; + creds.deduct_cost(cost)?; trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", @@ -924,7 +928,7 @@ impl LightProtocol { peer.local_credits.deduct_cost(peer.local_flow.base_cost())?; for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; - let cost = peer.local_flow.compute_cost(&request); + let cost = peer.local_flow.compute_cost(&request).ok_or(Error::NotServer)?; peer.local_credits.deduct_cost(cost)?; request_builder.push(request).map_err(|_| Error::BadBackReference)?; } @@ -939,7 +943,7 @@ impl LightProtocol { match complete_req { CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers), CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof), - CompleteRequest::TransactionIndex(_) => None, // don't answer these yet, but leave them in protocol. + CompleteRequest::TransactionIndex(req) => self.provider.transaction_index(req).map(Response::TransactionIndex), CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body), CompleteRequest::Receipts(req) => self.provider.block_receipts(req).map(Response::Receipts), CompleteRequest::Account(req) => self.provider.account_proof(req).map(Response::Account), diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 8c2e89eec..5007d4d6c 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -79,19 +79,42 @@ impl Credits { } /// A cost table, mapping requests to base and per-request costs. +/// Costs themselves may be missing. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CostTable { base: U256, // cost per packet. - headers: U256, // cost per header - transaction_index: U256, - body: U256, - receipts: U256, - account: U256, - storage: U256, - code: U256, - header_proof: U256, - transaction_proof: U256, // cost per gas. - epoch_signal: U256, + headers: Option, // cost per header + transaction_index: Option, + body: Option, + receipts: Option, + account: Option, + storage: Option, + code: Option, + header_proof: Option, + transaction_proof: Option, // cost per gas. + epoch_signal: Option, +} + +impl CostTable { + fn costs_set(&self) -> usize { + let mut num_set = 0; + + { + let mut incr_if_set = |cost: &Option<_>| if cost.is_some() { num_set += 1 }; + incr_if_set(&self.headers); + incr_if_set(&self.transaction_index); + incr_if_set(&self.body); + incr_if_set(&self.receipts); + incr_if_set(&self.account); + incr_if_set(&self.storage); + incr_if_set(&self.code); + incr_if_set(&self.header_proof); + incr_if_set(&self.transaction_proof); + incr_if_set(&self.epoch_signal); + } + + num_set + } } impl Default for CostTable { @@ -99,31 +122,32 @@ impl Default for CostTable { // arbitrarily chosen constants. CostTable { base: 100000.into(), - headers: 10000.into(), - transaction_index: 10000.into(), - body: 15000.into(), - receipts: 5000.into(), - account: 25000.into(), - storage: 25000.into(), - code: 20000.into(), - header_proof: 15000.into(), - transaction_proof: 2.into(), - epoch_signal: 10000.into(), + headers: Some(10000.into()), + transaction_index: Some(10000.into()), + body: Some(15000.into()), + receipts: Some(5000.into()), + account: Some(25000.into()), + storage: Some(25000.into()), + code: Some(20000.into()), + header_proof: Some(15000.into()), + transaction_proof: Some(2.into()), + epoch_signal: Some(10000.into()), } } } impl Encodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { - fn append_cost(s: &mut RlpStream, cost: &U256, kind: request::Kind) { - s.begin_list(2); - - // hack around https://github.com/paritytech/parity/issues/4356 - Encodable::rlp_append(&kind, s); - s.append(cost); + fn append_cost(s: &mut RlpStream, cost: &Option, kind: request::Kind) { + if let Some(ref cost) = *cost { + s.begin_list(2); + // hack around https://github.com/paritytech/parity/issues/4356 + Encodable::rlp_append(&kind, s); + s.append(cost); + } } - s.begin_list(11).append(&self.base); + s.begin_list(1 + self.costs_set()).append(&self.base); append_cost(s, &self.headers, request::Kind::Headers); append_cost(s, &self.transaction_index, request::Kind::TransactionIndex); append_cost(s, &self.body, request::Kind::Body); @@ -168,21 +192,25 @@ impl Decodable for CostTable { } } - let unwrap_cost = |cost: Option| cost.ok_or(DecoderError::Custom("Not all costs specified in cost table.")); - - Ok(CostTable { + let table = CostTable { base: base, - headers: unwrap_cost(headers)?, - transaction_index: unwrap_cost(transaction_index)?, - body: unwrap_cost(body)?, - receipts: unwrap_cost(receipts)?, - account: unwrap_cost(account)?, - storage: unwrap_cost(storage)?, - code: unwrap_cost(code)?, - header_proof: unwrap_cost(header_proof)?, - transaction_proof: unwrap_cost(transaction_proof)?, - epoch_signal: unwrap_cost(epoch_signal)?, - }) + headers: headers, + transaction_index: transaction_index, + body: body, + receipts: receipts, + account: account, + storage: storage, + code: code, + header_proof: header_proof, + transaction_proof: transaction_proof, + epoch_signal: epoch_signal, + }; + + if table.costs_set() == 0 { + Err(DecoderError::Custom("no cost types set.")) + } else { + Ok(table) + } } } @@ -230,7 +258,7 @@ impl FlowParams { let serve_per_second = serve_per_second.max(1.0 / 10_000.0); // as a percentage of the recharge per second. - U256::from((recharge as f64 / serve_per_second) as u64) + Some(U256::from((recharge as f64 / serve_per_second) as u64)) }; let costs = CostTable { @@ -256,12 +284,12 @@ impl FlowParams { /// Create effectively infinite flow params. pub fn free() -> Self { - let free_cost: U256 = 0.into(); + let free_cost: Option = Some(0.into()); FlowParams { limit: (!0u64).into(), recharge: 1.into(), costs: CostTable { - base: free_cost.clone(), + base: 0.into(), headers: free_cost.clone(), transaction_index: free_cost.clone(), body: free_cost.clone(), @@ -290,9 +318,9 @@ impl FlowParams { /// Compute the actual cost of a request, given the kind of request /// and number of requests made. - pub fn compute_cost(&self, request: &Request) -> U256 { + pub fn compute_cost(&self, request: &Request) -> Option { match *request { - Request::Headers(ref req) => self.costs.headers * req.max.into(), + Request::Headers(ref req) => self.costs.headers.map(|c| c * req.max.into()), Request::HeaderProof(_) => self.costs.header_proof, Request::TransactionIndex(_) => self.costs.transaction_index, Request::Body(_) => self.costs.body, @@ -300,15 +328,23 @@ impl FlowParams { Request::Account(_) => self.costs.account, Request::Storage(_) => self.costs.storage, Request::Code(_) => self.costs.code, - Request::Execution(ref req) => self.costs.transaction_proof * req.gas, + Request::Execution(ref req) => self.costs.transaction_proof.map(|c| c * req.gas), Request::Signal(_) => self.costs.epoch_signal, } } /// Compute the cost of a set of requests. /// This is the base cost plus the cost of each individual request. - pub fn compute_cost_multi(&self, requests: &[Request]) -> U256 { - requests.iter().fold(self.costs.base, |cost, req| cost + self.compute_cost(req)) + pub fn compute_cost_multi(&self, requests: &[Request]) -> Option { + let mut cost = self.costs.base; + for request in requests { + match self.compute_cost(request) { + Some(c) => cost = cost + c, + None => return None, + } + } + + Some(cost) } /// Create initial credits. @@ -408,6 +444,6 @@ mod tests { ); assert_eq!(flow_params2.costs, flow_params3.costs); - assert_eq!(flow_params.costs.headers, flow_params2.costs.headers * 2.into()); + assert_eq!(flow_params.costs.headers.unwrap(), flow_params2.costs.headers.unwrap() * 2.into()); } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index af0a02c3e..0e344d803 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -116,6 +116,16 @@ impl Provider for TestProvider { self.0.client.block_header(id) } + fn transaction_index(&self, req: request::CompleteTransactionIndexRequest) + -> Option + { + Some(request::TransactionIndexResponse { + num: 100, + hash: req.hash, + index: 55, + }) + } + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { self.0.client.block_body(req) } @@ -308,7 +318,7 @@ fn get_block_headers() { let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(headers.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); let response = vec![Response::Headers(HeadersResponse { headers: headers, @@ -361,7 +371,7 @@ fn get_block_bodies() { let request_body = make_packet(req_id, &requests); let response = { - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); let mut response_stream = RlpStream::new_list(3); response_stream.append(&req_id).append(&new_creds).append_list(&bodies); @@ -416,7 +426,7 @@ fn get_block_receipts() { let response = { assert_eq!(receipts.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); let mut response_stream = RlpStream::new_list(3); response_stream.append(&req_id).append(&new_creds).append_list(&receipts); @@ -475,7 +485,7 @@ fn get_state_proofs() { }).unwrap()), ]; - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); let mut response_stream = RlpStream::new_list(3); response_stream.append(&req_id).append(&new_creds).append_list(&responses); @@ -517,7 +527,7 @@ fn get_contract_code() { code: key1.iter().chain(key2.iter()).cloned().collect(), })]; - let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); let mut response_stream = RlpStream::new_list(3); @@ -558,9 +568,8 @@ fn epoch_signal() { })]; let limit = *flow_params.limit(); - let cost = flow_params.compute_cost_multi(requests.requests()); + let cost = flow_params.compute_cost_multi(requests.requests()).unwrap(); - println!("limit = {}, cost = {}", limit, cost); let new_creds = limit - cost; let mut response_stream = RlpStream::new_list(3); @@ -605,9 +614,8 @@ fn proof_of_execution() { let response = { let limit = *flow_params.limit(); - let cost = flow_params.compute_cost_multi(requests.requests()); + let cost = flow_params.compute_cost_multi(requests.requests()).unwrap(); - println!("limit = {}, cost = {}", limit, cost); let new_creds = limit - cost; let mut response_stream = RlpStream::new_list(3); @@ -713,3 +721,46 @@ fn id_guard() { assert_eq!(peer_info.failed_requests, &[req_id_1]); } } + +#[test] +fn get_transaction_index() { + let capabilities = capabilities(); + + let (provider, proto) = setup(capabilities.clone()); + let flow_params = proto.flow_params.read().clone(); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, &proto); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); + } + + let req_id = 112; + let key1: H256 = U256::from(11223344).into(); + + let request = Request::TransactionIndex(IncompleteTransactionIndexRequest { + hash: key1.into(), + }); + + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { + let response = vec![Response::TransactionIndex(TransactionIndexResponse { + num: 100, + hash: key1, + index: 55, + })]; + + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap(); + + let mut response_stream = RlpStream::new_list(3); + + response_stream.append(&req_id).append(&new_creds).append_list(&response); + response_stream.out() + }; + + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); +} diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 40da12348..c7d2a01e1 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -195,6 +195,7 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities { caps.serve_headers = true, CheckedRequest::HeaderByHash(_, _) => caps.serve_headers = true, + CheckedRequest::TransactionIndex(_, _) => {} // hashes yield no info. CheckedRequest::Signal(_, _) => caps.serve_headers = true, CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() { diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 4b0da5677..2c0dd4121 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -48,6 +48,8 @@ pub enum Request { HeaderProof(HeaderProof), /// A request for a header by hash. HeaderByHash(HeaderByHash), + /// A request for the index of a transaction. + TransactionIndex(TransactionIndex), /// A request for block receipts. Receipts(BlockReceipts), /// A request for a block body. @@ -135,6 +137,7 @@ macro_rules! impl_single { // implement traits for each kind of request. impl_single!(HeaderProof, HeaderProof, (H256, U256)); impl_single!(HeaderByHash, HeaderByHash, encoded::Header); +impl_single!(TransactionIndex, TransactionIndex, net_request::TransactionIndexResponse); impl_single!(Receipts, BlockReceipts, Vec); impl_single!(Body, Body, encoded::Block); impl_single!(Account, Account, Option); @@ -244,6 +247,7 @@ impl From for HeaderRef { pub enum CheckedRequest { HeaderProof(HeaderProof, net_request::IncompleteHeaderProofRequest), HeaderByHash(HeaderByHash, net_request::IncompleteHeadersRequest), + TransactionIndex(TransactionIndex, net_request::IncompleteTransactionIndexRequest), Receipts(BlockReceipts, net_request::IncompleteReceiptsRequest), Body(Body, net_request::IncompleteBodyRequest), Account(Account, net_request::IncompleteAccountRequest), @@ -270,6 +274,12 @@ impl From for CheckedRequest { }; CheckedRequest::HeaderProof(req, net_req) } + Request::TransactionIndex(req) => { + let net_req = net_request::IncompleteTransactionIndexRequest { + hash: req.0.clone(), + }; + CheckedRequest::TransactionIndex(req, net_req) + } Request::Body(req) => { let net_req = net_request::IncompleteBodyRequest { hash: req.0.field(), @@ -326,6 +336,7 @@ impl CheckedRequest { match self { CheckedRequest::HeaderProof(_, req) => NetRequest::HeaderProof(req), CheckedRequest::HeaderByHash(_, req) => NetRequest::Headers(req), + CheckedRequest::TransactionIndex(_, req) => NetRequest::TransactionIndex(req), CheckedRequest::Receipts(_, req) => NetRequest::Receipts(req), CheckedRequest::Body(_, req) => NetRequest::Body(req), CheckedRequest::Account(_, req) => NetRequest::Account(req), @@ -454,6 +465,7 @@ macro_rules! match_me { match $me { CheckedRequest::HeaderProof($check, $req) => $e, CheckedRequest::HeaderByHash($check, $req) => $e, + CheckedRequest::TransactionIndex($check, $req) => $e, CheckedRequest::Receipts($check, $req) => $e, CheckedRequest::Body($check, $req) => $e, CheckedRequest::Account($check, $req) => $e, @@ -482,6 +494,7 @@ impl IncompleteRequest for CheckedRequest { _ => Ok(()), } } + CheckedRequest::TransactionIndex(_, ref req) => req.check_outputs(f), CheckedRequest::Receipts(_, ref req) => req.check_outputs(f), CheckedRequest::Body(_, ref req) => req.check_outputs(f), CheckedRequest::Account(_, ref req) => req.check_outputs(f), @@ -503,6 +516,7 @@ impl IncompleteRequest for CheckedRequest { match self { CheckedRequest::HeaderProof(_, req) => req.complete().map(CompleteRequest::HeaderProof), CheckedRequest::HeaderByHash(_, req) => req.complete().map(CompleteRequest::Headers), + CheckedRequest::TransactionIndex(_, req) => req.complete().map(CompleteRequest::TransactionIndex), CheckedRequest::Receipts(_, req) => req.complete().map(CompleteRequest::Receipts), CheckedRequest::Body(_, req) => req.complete().map(CompleteRequest::Body), CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account), @@ -545,6 +559,9 @@ impl net_request::CheckedRequest for CheckedRequest { CheckedRequest::HeaderByHash(ref prover, _) => expect!((&NetResponse::Headers(ref res), &CompleteRequest::Headers(ref req)) => prover.check_response(cache, &req.start, &res.headers).map(Response::HeaderByHash)), + CheckedRequest::TransactionIndex(ref prover, _) => + expect!((&NetResponse::TransactionIndex(ref res), _) => + prover.check_response(cache, res).map(Response::TransactionIndex)), CheckedRequest::Receipts(ref prover, _) => expect!((&NetResponse::Receipts(ref res), _) => prover.check_response(cache, &res.receipts).map(Response::Receipts)), @@ -575,6 +592,8 @@ pub enum Response { HeaderProof((H256, U256)), /// Response to a header-by-hash request. HeaderByHash(encoded::Header), + /// Response to a transaction-index request. + TransactionIndex(net_request::TransactionIndexResponse), /// Response to a receipts request. Receipts(Vec), /// Response to a block body request. @@ -723,6 +742,33 @@ impl HeaderByHash { } } +/// Request for a transaction index. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TransactionIndex(pub Field); + +impl TransactionIndex { + /// Check a response for the transaction index. + // + // TODO: proper checking involves looking at canonicality of the + // hash w.r.t. the current best block header. + // + // unlike all other forms of request, we don't know the header to check + // until we make this request. + // + // This would require lookups in the database or perhaps CHT requests, + // which aren't currently possible. + // + // Also, returning a result that is not locally canonical doesn't necessarily + // indicate misbehavior, so the punishment scheme would need to be revised. + pub fn check_response( + &self, + _cache: &Mutex<::cache::Cache>, + res: &net_request::TransactionIndexResponse, + ) -> Result { + Ok(res.clone()) + } +} + /// Request for a block, with header for verification. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Body(pub HeaderRef); diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index d71a5fff0..3e2ddcb9d 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! A provider for the LES protocol. This is typically a full node, who can +//! A provider for the PIP protocol. This is typically a full node, who can //! give as much data as necessary to its peers. use std::sync::Arc; @@ -102,6 +102,10 @@ pub trait Provider: Send + Sync { /// Get a block header by id. fn block_header(&self, id: BlockId) -> Option; + /// Get a transaction index by hash. + fn transaction_index(&self, req: request::CompleteTransactionIndexRequest) + -> Option; + /// Fulfill a block body request. fn block_body(&self, req: request::CompleteBodyRequest) -> Option; @@ -150,6 +154,18 @@ impl Provider for T { BlockChainClient::block_header(self, id) } + fn transaction_index(&self, req: request::CompleteTransactionIndexRequest) + -> Option + { + use ethcore::ids::TransactionId; + + self.transaction_receipt(TransactionId::Hash(req.hash)).map(|receipt| request::TransactionIndexResponse { + num: receipt.block_number, + hash: receipt.block_hash, + index: receipt.transaction_index as u64, + }) + } + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { BlockChainClient::block_body(self, BlockId::Hash(req.hash)) .map(|body| ::request::BodyResponse { body: body }) @@ -311,6 +327,12 @@ impl Provider for LightProvider { self.client.as_light_client().block_header(id) } + fn transaction_index(&self, _req: request::CompleteTransactionIndexRequest) + -> Option + { + None + } + fn block_body(&self, _req: request::CompleteBodyRequest) -> Option { None } diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index bb030b46a..ac5902b51 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -23,7 +23,8 @@ use ethcore::encoded; use ethcore::executed::{Executed, ExecutionError}; use ethcore::ids::BlockId; use ethcore::filter::Filter as EthcoreFilter; -use ethcore::transaction::{Action, Transaction as EthTransaction}; +use ethcore::transaction::{Action, Transaction as EthTransaction, SignedTransaction}; +use ethcore::receipt::Receipt; use jsonrpc_core::{BoxFuture, Error}; use jsonrpc_core::futures::{future, Future}; @@ -38,14 +39,18 @@ use light::request::Field; use ethsync::LightSync; use bigint::prelude::U256; +use hash::H256; use util::Address; use parking_lot::Mutex; use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch}; -use v1::types::{BlockNumber, CallRequest, Log}; +use v1::types::{BlockNumber, CallRequest, Log, Transaction}; + +const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; /// Helper for fetching blockchain data either from the light client or the network /// as necessary. +#[derive(Clone)] pub struct LightFetch { /// The light client. pub client: Arc, @@ -57,6 +62,19 @@ pub struct LightFetch { pub cache: Arc>, } +/// Extract a transaction at given index. +pub fn extract_transaction_at_index(block: encoded::Block, index: usize, eip86_transition: u64) -> Option { + block.transactions().into_iter().nth(index) + .and_then(|tx| SignedTransaction::new(tx).ok()) + .map(|tx| Transaction::from_signed(tx, block.number(), eip86_transition)) + .map(|mut tx| { + tx.block_hash = Some(block.hash().into()); + tx.transaction_index = Some(index.into()); + tx + }) +} + + /// Type alias for convenience. pub type ExecutionResult = Result; @@ -131,7 +149,7 @@ impl LightFetch { } } - /// helper for getting account info at a given block. + /// Helper for getting account info at a given block. /// `None` indicates the account doesn't exist at the given block. pub fn account(&self, address: Address, id: BlockId) -> BoxFuture, Error> { let mut reqs = Vec::new(); @@ -158,7 +176,7 @@ impl LightFetch { } } - /// helper for getting proved execution. + /// Helper for getting proved execution. pub fn proved_execution(&self, req: CallRequest, num: Trailing) -> BoxFuture { const DEFAULT_GAS_PRICE: u64 = 21_000; // starting gas when gas not provided. @@ -235,7 +253,7 @@ impl LightFetch { })) } - /// get a block itself. fails on unknown block ID. + /// Get a block itself. Fails on unknown block ID. pub fn block(&self, id: BlockId) -> BoxFuture { let mut reqs = Vec::new(); let header_ref = match self.make_header_requests(id, &mut reqs) { @@ -247,7 +265,7 @@ impl LightFetch { let maybe_future = self.sync.with_context(move |ctx| { Box::new(self.on_demand.request_raw(ctx, reqs) - .expect("all back-references known to be valid; qed") + .expect(NO_INVALID_BACK_REFS) .map(|mut res| match res.pop() { Some(OnDemandResponse::Body(b)) => b, _ => panic!("responses correspond directly with requests in amount and type; qed"), @@ -261,13 +279,37 @@ impl LightFetch { } } - /// get transaction logs + /// Get the block receipts. Fails on unknown block ID. + pub fn receipts(&self, id: BlockId) -> BoxFuture, Error> { + let mut reqs = Vec::new(); + let header_ref = match self.make_header_requests(id, &mut reqs) { + Ok(r) => r, + Err(e) => return Box::new(future::err(e)), + }; + + reqs.push(request::BlockReceipts(header_ref).into()); + + let maybe_future = self.sync.with_context(move |ctx| { + Box::new(self.on_demand.request_raw(ctx, reqs) + .expect(NO_INVALID_BACK_REFS) + .map(|mut res| match res.pop() { + Some(OnDemandResponse::Receipts(b)) => b, + _ => panic!("responses correspond directly with requests in amount and type; qed"), + }) + .map_err(errors::on_demand_cancel)) + }); + + match maybe_future { + Some(recv) => recv, + None => Box::new(future::err(errors::network_disabled())) + } + } + + /// Get transaction logs pub fn logs(&self, filter: EthcoreFilter) -> BoxFuture, Error> { use std::collections::BTreeMap; use jsonrpc_core::futures::stream::{self, Stream}; - const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; - // early exit for "to" block before "from" block. let best_number = self.client.chain_info().best_block_number; let block_number = |id| match id { @@ -318,6 +360,65 @@ impl LightFetch { None => Box::new(future::err(errors::network_disabled())), } } + + // Get a transaction by hash. also returns the index in the block. + // Only returns transactions in the canonical chain. + pub fn transaction_by_hash(&self, tx_hash: H256, eip86_transition: u64) + -> BoxFuture, Error> + { + let params = (self.sync.clone(), self.on_demand.clone()); + let fetcher: Self = self.clone(); + + Box::new(future::loop_fn(params, move |(sync, on_demand)| { + let maybe_future = sync.with_context(|ctx| { + let req = request::TransactionIndex(tx_hash.clone().into()); + on_demand.request(ctx, req) + }); + + let eventual_index = match maybe_future { + Some(e) => e.expect(NO_INVALID_BACK_REFS).map_err(errors::on_demand_cancel), + None => return Either::A(future::err(errors::network_disabled())), + }; + + let fetcher = fetcher.clone(); + let extract_transaction = eventual_index.and_then(move |index| { + // check that the block is known by number. + // that ensures that it is within the chain that we are aware of. + fetcher.block(BlockId::Number(index.num)).then(move |blk| match blk { + Ok(blk) => { + // if the block is known by number, make sure the + // index from earlier isn't garbage. + + if blk.hash() != index.hash { + // index is on a different chain from us. + return Ok(future::Loop::Continue((sync, on_demand))) + } + + let index = index.index as usize; + let transaction = extract_transaction_at_index(blk, index, eip86_transition); + + if transaction.as_ref().map_or(true, |tx| tx.hash != tx_hash.into()) { + // index is actively wrong: indicated block has + // fewer transactions than necessary or the transaction + // at that index had a different hash. + // TODO: punish peer/move into OnDemand somehow? + Ok(future::Loop::Continue((sync, on_demand))) + } else { + let transaction = transaction.map(move |tx| (tx, index)); + Ok(future::Loop::Break(transaction)) + } + } + Err(ref e) if e == &errors::unknown_block() => { + // block by number not in the canonical chain. + Ok(future::Loop::Break(None)) + } + Err(e) => Err(e), + }) + }); + + Either::B(extract_transaction) + })) + } } #[derive(Clone)] diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 3afdf2d74..7c16c5a8a 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -458,38 +458,54 @@ impl Eth for EthClient where Box::new(future::done(self.block(num.into(), include_txs))) } - fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { + fn transaction_by_hash(&self, hash: RpcH256) -> BoxFuture, Error> { let hash: H256 = hash.into(); let block_number = self.client.chain_info().best_block_number; - Ok(self.transaction(TransactionId::Hash(hash))?.or_else(|| self.miner.transaction(block_number, &hash).map(|t| Transaction::from_pending(t, block_number, self.eip86_transition)))) + let tx = try_bf!(self.transaction(TransactionId::Hash(hash))).or_else(|| { + self.miner.transaction(block_number, &hash) + .map(|t| Transaction::from_pending(t, block_number, self.eip86_transition)) + }); + + Box::new(future::ok(tx)) } - fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result, Error> { - self.transaction(TransactionId::Location(BlockId::Hash(hash.into()), index.value())) + fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> BoxFuture, Error> { + Box::new(future::done( + self.transaction(TransactionId::Location(BlockId::Hash(hash.into()), index.value())) + )) } - fn transaction_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result, Error> { - self.transaction(TransactionId::Location(num.into(), index.value())) + fn transaction_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> BoxFuture, Error> { + Box::new(future::done( + self.transaction(TransactionId::Location(num.into(), index.value())) + )) } - fn transaction_receipt(&self, hash: RpcH256) -> Result, Error> { + fn transaction_receipt(&self, hash: RpcH256) -> BoxFuture, Error> { let best_block = self.client.chain_info().best_block_number; let hash: H256 = hash.into(); + match (self.miner.pending_receipt(best_block, &hash), self.options.allow_pending_receipt_query) { - (Some(receipt), true) => Ok(Some(receipt.into())), + (Some(receipt), true) => Box::new(future::ok(Some(receipt.into()))), _ => { let receipt = self.client.transaction_receipt(TransactionId::Hash(hash)); - Ok(receipt.map(Into::into)) + Box::new(future::ok(receipt.map(Into::into))) } } } - fn uncle_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result, Error> { - self.uncle(UncleId { block: BlockId::Hash(hash.into()), position: index.value() }) + fn uncle_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> BoxFuture, Error> { + Box::new(future::done(self.uncle(UncleId { + block: BlockId::Hash(hash.into()), + position: index.value() + }))) } - fn uncle_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result, Error> { - self.uncle(UncleId { block: num.into(), position: index.value() }) + fn uncle_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> BoxFuture, Error> { + Box::new(future::done(self.uncle(UncleId { + block: num.into(), + position: index.value() + }))) } fn compilers(&self) -> Result, Error> { diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 0f7438eb0..b797e76c2 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -39,11 +39,10 @@ use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; use bigint::prelude::U256; use parking_lot::{RwLock, Mutex}; - use v1::impls::eth_filter::Filterable; use v1::helpers::{errors, limit_logs}; use v1::helpers::{PollFilter, PollManager}; -use v1::helpers::light_fetch::LightFetch; +use v1::helpers::light_fetch::{self, LightFetch}; use v1::traits::Eth; use v1::types::{ RichBlock, Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, @@ -80,7 +79,6 @@ impl Clone for EthClient { } } - impl EthClient { /// Create a new `EthClient` with a handle to the light sync instance, client, /// and on-demand request service, which is assumed to be attached as a handler. @@ -393,33 +391,72 @@ impl Eth for EthClient { })) } - fn transaction_by_hash(&self, _hash: RpcH256) -> Result, Error> { - Err(errors::unimplemented(None)) + fn transaction_by_hash(&self, hash: RpcH256) -> BoxFuture, Error> { + let eip86 = self.client.eip86_transition(); + Box::new(self.fetcher().transaction_by_hash(hash.into(), eip86).map(|x| x.map(|(tx, _)| tx))) } - fn transaction_by_block_hash_and_index(&self, _hash: RpcH256, _idx: Index) -> Result, Error> { - Err(errors::unimplemented(None)) + fn transaction_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> BoxFuture, Error> { + let eip86 = self.client.eip86_transition(); + Box::new(self.fetcher().block(BlockId::Hash(hash.into())).map(move |block| { + light_fetch::extract_transaction_at_index(block, idx.value(), eip86) + })) } - fn transaction_by_block_number_and_index(&self, _num: BlockNumber, _idx: Index) -> Result, Error> { - Err(errors::unimplemented(None)) + fn transaction_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> BoxFuture, Error> { + let eip86 = self.client.eip86_transition(); + Box::new(self.fetcher().block(num.into()).map(move |block| { + light_fetch::extract_transaction_at_index(block, idx.value(), eip86) + })) } - fn transaction_receipt(&self, _hash: RpcH256) -> Result, Error> { - Err(errors::unimplemented(None)) + fn transaction_receipt(&self, hash: RpcH256) -> BoxFuture, Error> { + let eip86 = self.client.eip86_transition(); + let fetcher = self.fetcher(); + Box::new(fetcher.transaction_by_hash(hash.clone().into(), eip86).and_then(move |tx| { + // the block hash included in the transaction object here has + // already been checked for canonicality and whether it contains + // the transaction. + match tx { + Some((tx, index)) => match tx.block_hash.clone() { + Some(block_hash) => { + let extract_receipt = fetcher.receipts(BlockId::Hash(block_hash.clone().into())) + .and_then(move |mut receipts| future::ok(receipts.swap_remove(index))) + .map(Receipt::from) + .map(move |mut receipt| { + receipt.transaction_hash = Some(hash); + receipt.transaction_index = Some(index.into()); + receipt.block_hash = Some(block_hash); + receipt.block_number = tx.block_number; + receipt + }) + .map(Some); + + Either::B(extract_receipt) + } + None => Either::A(future::err(errors::unknown_block())), + }, + None => Either::A(future::ok(None)), + } + })) } - fn uncle_by_block_hash_and_index(&self, _hash: RpcH256, _idx: Index) -> Result, Error> { - Err(errors::unimplemented(None)) + fn uncle_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> BoxFuture, Error> { + let client = self.client.clone(); + Box::new(self.fetcher().block(BlockId::Hash(hash.into())).map(move |block| { + extract_uncle_at_index(block, idx, client) + })) } - fn uncle_by_block_number_and_index(&self, _num: BlockNumber, _idx: Index) -> Result, Error> { - Err(errors::unimplemented(None)) + fn uncle_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> BoxFuture, Error> { + let client = self.client.clone(); + Box::new(self.fetcher().block(num.into()).map(move |block| { + extract_uncle_at_index(block, idx, client) + })) } fn compilers(&self) -> Result, Error> { Err(errors::deprecated("Compilation functionality is deprecated.".to_string())) - } fn compile_lll(&self, _: String) -> Result { @@ -478,3 +515,37 @@ impl Filterable for EthClient { &self.polls } } + +fn extract_uncle_at_index(block: encoded::Block, index: Index, client: Arc) -> Option { + let uncle = match block.uncles().into_iter().nth(index.value()) { + Some(u) => u, + None => return None, + }; + + let extra_info = client.engine().extra_info(&uncle); + Some(RichBlock { + inner: Block { + hash: Some(uncle.hash().into()), + size: None, + parent_hash: uncle.parent_hash().clone().into(), + uncles_hash: uncle.uncles_hash().clone().into(), + author: uncle.author().clone().into(), + miner: uncle.author().clone().into(), + state_root: uncle.state_root().clone().into(), + transactions_root: uncle.transactions_root().clone().into(), + number: Some(uncle.number().into()), + gas_used: uncle.gas_used().clone().into(), + gas_limit: uncle.gas_limit().clone().into(), + logs_bloom: uncle.log_bloom().clone().into(), + timestamp: uncle.timestamp().into(), + difficulty: uncle.difficulty().clone().into(), + total_difficulty: None, + receipts_root: uncle.receipts_root().clone().into(), + extra_data: uncle.extra_data().clone().into(), + seal_fields: uncle.seal().into_iter().cloned().map(Into::into).collect(), + uncles: vec![], + transactions: BlockTransactions::Hashes(vec![]), + }, + extra_info: extra_info, + }) +} diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 8bd97108c..96329d18c 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -547,7 +547,7 @@ fn rpc_eth_pending_transaction_by_hash() { tester.miner.pending_transactions.lock().insert(H256::zero(), tx); } - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"chainId":null,"condition":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","publicKey":"0x7ae46da747962c2ee46825839c1ef9298e3bd2e70ca2938495c3693a485ec3eaa8f196327881090ff64cf4fbb0a48485d4f83098e189ed3b7a87d5941b59f789","r":"0x48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","s":"0xefffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","standardV":"0x0","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"v":"0x1b","value":"0xa"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":"0x0","chainId":null,"condition":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","publicKey":"0x7ae46da747962c2ee46825839c1ef9298e3bd2e70ca2938495c3693a485ec3eaa8f196327881090ff64cf4fbb0a48485d4f83098e189ed3b7a87d5941b59f789","r":"0x48b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","s":"0xefffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","standardV":"0x0","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"v":"0x1b","value":"0xa"},"id":1}"#; let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionByHash", @@ -863,7 +863,7 @@ fn rpc_eth_sign_transaction() { let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + r#""raw":"0x"# + &rlp.to_hex() + r#"","# + r#""tx":{"# + - r#""blockHash":null,"blockNumber":null,"# + + r#""blockHash":null,"blockNumber":"0x0","# + &format!("\"chainId\":{},", t.chain_id().map_or("null".to_owned(), |n| format!("{}", n))) + r#""condition":null,"creates":null,"# + &format!("\"from\":\"0x{:?}\",", &address) + diff --git a/rpc/src/v1/tests/mocked/parity_set.rs b/rpc/src/v1/tests/mocked/parity_set.rs index ed27862ac..1653a1908 100644 --- a/rpc/src/v1/tests/mocked/parity_set.rs +++ b/rpc/src/v1/tests/mocked/parity_set.rs @@ -234,7 +234,7 @@ fn rpc_parity_remove_transaction() { let hash = signed.hash(); let request = r#"{"jsonrpc": "2.0", "method": "parity_removeTransaction", "params":[""#.to_owned() + &format!("0x{:?}", hash) + r#""], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"chainId":null,"condition":null,"creates":null,"from":"0x0000000000000000000000000000000000000002","gas":"0x76c0","gasPrice":"0x9184e72a000","hash":"0xa2e0da8a8064e0b9f93e95a53c2db6d01280efb8ac72a708d25487e67dd0f8fc","input":"0x","nonce":"0x1","publicKey":null,"r":"0x1","raw":"0xe9018609184e72a0008276c0940000000000000000000000000000000000000005849184e72a80800101","s":"0x1","standardV":"0x4","to":"0x0000000000000000000000000000000000000005","transactionIndex":null,"v":"0x0","value":"0x9184e72a"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":"0x0","chainId":null,"condition":null,"creates":null,"from":"0x0000000000000000000000000000000000000002","gas":"0x76c0","gasPrice":"0x9184e72a000","hash":"0xa2e0da8a8064e0b9f93e95a53c2db6d01280efb8ac72a708d25487e67dd0f8fc","input":"0x","nonce":"0x1","publicKey":null,"r":"0x1","raw":"0xe9018609184e72a0008276c0940000000000000000000000000000000000000005849184e72a80800101","s":"0x1","standardV":"0x4","to":"0x0000000000000000000000000000000000000005","transactionIndex":null,"v":"0x0","value":"0x9184e72a"},"id":1}"#; miner.pending_transactions.lock().insert(hash, signed); assert_eq!(io.handle_request_sync(&request), Some(response.to_owned())); diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index af84d336a..95095cb72 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -456,7 +456,7 @@ fn should_confirm_sign_transaction_with_rlp() { let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + r#""raw":"0x"# + &rlp.to_hex() + r#"","# + r#""tx":{"# + - r#""blockHash":null,"blockNumber":null,"# + + r#""blockHash":null,"blockNumber":"0x0","# + &format!("\"chainId\":{},", t.chain_id().map_or("null".to_owned(), |n| format!("{}", n))) + r#""condition":null,"creates":null,"# + &format!("\"from\":\"0x{:?}\",", &address) + diff --git a/rpc/src/v1/tests/mocked/signing.rs b/rpc/src/v1/tests/mocked/signing.rs index fd08f5ee9..a767bfb77 100644 --- a/rpc/src/v1/tests/mocked/signing.rs +++ b/rpc/src/v1/tests/mocked/signing.rs @@ -299,7 +299,7 @@ fn should_add_sign_transaction_to_the_queue() { let response = r#"{"jsonrpc":"2.0","result":{"#.to_owned() + r#""raw":"0x"# + &rlp.to_hex() + r#"","# + r#""tx":{"# + - r#""blockHash":null,"blockNumber":null,"# + + r#""blockHash":null,"blockNumber":"0x0","# + &format!("\"chainId\":{},", t.chain_id().map_or("null".to_owned(), |n| format!("{}", n))) + r#""condition":null,"creates":null,"# + &format!("\"from\":\"0x{:?}\",", &address) + diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 83543f10b..6c052bf31 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -117,27 +117,27 @@ build_rpc_trait! { /// Get transaction by its hash. #[rpc(name = "eth_getTransactionByHash")] - fn transaction_by_hash(&self, H256) -> Result, Error>; + fn transaction_by_hash(&self, H256) -> BoxFuture, Error>; /// Returns transaction at given block hash and index. #[rpc(name = "eth_getTransactionByBlockHashAndIndex")] - fn transaction_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; + fn transaction_by_block_hash_and_index(&self, H256, Index) -> BoxFuture, Error>; /// Returns transaction by given block number and index. #[rpc(name = "eth_getTransactionByBlockNumberAndIndex")] - fn transaction_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; + fn transaction_by_block_number_and_index(&self, BlockNumber, Index) -> BoxFuture, Error>; - /// Returns transaction receipt. + /// Returns transaction receipt by transaction hash. #[rpc(name = "eth_getTransactionReceipt")] - fn transaction_receipt(&self, H256) -> Result, Error>; + fn transaction_receipt(&self, H256) -> BoxFuture, Error>; /// Returns an uncles at given block and index. #[rpc(name = "eth_getUncleByBlockHashAndIndex")] - fn uncle_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; + fn uncle_by_block_hash_and_index(&self, H256, Index) -> BoxFuture, Error>; /// Returns an uncles at given block and index. #[rpc(name = "eth_getUncleByBlockNumberAndIndex")] - fn uncle_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; + fn uncle_by_block_number_and_index(&self, BlockNumber, Index) -> BoxFuture, Error>; /// Returns available compilers. /// @deprecated diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 90d512c86..570c21120 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -213,7 +213,7 @@ impl Transaction { hash: t.hash().into(), nonce: t.nonce.into(), block_hash: None, - block_number: None, + block_number: Some(block_number.into()), transaction_index: None, from: t.sender().into(), to: match t.action { From ca6d5660c12f520d7571fe073c039d959e2d485f Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Sun, 8 Oct 2017 17:28:31 +0000 Subject: [PATCH 22/22] [ci skip] js-precompiled 20171008-172308 --- Cargo.lock | 2 +- js/package-lock.json | 2 +- js/package.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index beab2a66e..7ec655522 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#c1cd82b377a7e21ddec99a19595685a36afbb352" +source = "git+https://github.com/paritytech/js-precompiled.git#aaa1f3610aa39ce4b5ebb23debce39e5368edbbd" dependencies = [ "parity-dapps-glue 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package-lock.json b/js/package-lock.json index 6fbeca609..0f26c26a9 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.27", + "version": "1.8.28", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/js/package.json b/js/package.json index 4867e8cad..472bb6155 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.8.27", + "version": "1.8.28", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ",