From 36556016933acce95c63624535461a7ebefd0f5f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 17:40:03 +0100 Subject: [PATCH 01/91] return errors on database corruption --- ethcore/src/block.rs | 3 +- ethcore/src/client/client.rs | 60 +++++---- ethcore/src/ethereum/ethash.rs | 10 +- ethcore/src/evm/evm.rs | 15 ++- ethcore/src/evm/ext.rs | 19 +-- ethcore/src/evm/interpreter/gasometer.rs | 12 +- ethcore/src/evm/interpreter/mod.rs | 20 +-- ethcore/src/executive.rs | 40 +++--- ethcore/src/externalities.rs | 65 ++++++--- ethcore/src/miner/miner.rs | 32 +++-- ethcore/src/miner/mod.rs | 10 +- ethcore/src/state/account.rs | 33 ++--- ethcore/src/state/mod.rs | 164 ++++++++++++----------- ethcore/src/types/executed.rs | 23 +++- ethcore/src/types/trace_types/error.rs | 14 +- 15 files changed, 290 insertions(+), 230 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index f2eff0d04..3626fdd3a 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -540,7 +540,8 @@ pub fn enact( { if ::log::max_log_level() >= ::log::LogLevel::Trace { let s = State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(), factories.clone())?; - trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", header.number(), s.root(), header.author(), s.balance(&header.author())); + trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", + header.number(), s.root(), header.author(), s.balance(&header.author())?); } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 96b25b351..7f209bad1 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -890,17 +890,20 @@ impl BlockChainClient for Client { let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let sender = t.sender(); - let balance = state.balance(&sender); + let balance = state.balance(&sender).map_err(|_| CallError::StateCorrupt)?; let needed_balance = t.value + t.gas * t.gas_price; if balance < needed_balance { // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty); + state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) + .map_err(|_| CallError::StateCorrupt)?; } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, options)?; // TODO gav move this into Executive. - ret.state_diff = original_state.map(|original| state.diff_from(original)); + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); + } Ok(ret) } @@ -921,7 +924,7 @@ impl BlockChainClient for Client { // that's just a copy of the state. let original_state = self.state_at(block).ok_or(CallError::StatePruned)?; let sender = t.sender(); - let balance = original_state.balance(&sender); + let balance = original_state.balance(&sender).map_err(ExecutionError::from)?; let options = TransactOptions { tracing: true, vm_tracing: false, check_nonce: false }; let cond = |gas| { @@ -933,27 +936,29 @@ impl BlockChainClient for Client { let needed_balance = tx.value + tx.gas * tx.gas_price; if balance < needed_balance { // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty); + state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) + .map_err(ExecutionError::from)?; } - Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm) + Ok(Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm) .transact(&tx, options.clone()) .map(|r| r.exception.is_none()) - .unwrap_or(false) + .unwrap_or(false)) }; let mut upper = header.gas_limit(); - if !cond(upper) { + if !cond(upper)? { // impossible at block gas limit - try `UPPER_CEILING` instead. // TODO: consider raising limit by powers of two. upper = UPPER_CEILING.into(); - if !cond(upper) { + if !cond(upper)? { trace!(target: "estimate_gas", "estimate_gas failed with {}", upper); - return Err(CallError::Execution(ExecutionError::Internal)) + let err = ExecutionError::Internal(format!("Requires higher than upper limit of {}", upper)); + return Err(err.into()) } } let lower = t.gas_required(&self.engine.schedule(&env_info)).into(); - if cond(lower) { + if cond(lower)? { trace!(target: "estimate_gas", "estimate_gas succeeded with {}", lower); return Ok(lower) } @@ -961,23 +966,25 @@ impl BlockChainClient for Client { /// Find transition point between `lower` and `upper` where `cond` changes from `false` to `true`. /// Returns the lowest value between `lower` and `upper` for which `cond` returns true. /// We assert: `cond(lower) = false`, `cond(upper) = true` - fn binary_chop(mut lower: U256, mut upper: U256, mut cond: F) -> U256 where F: FnMut(U256) -> bool { + fn binary_chop(mut lower: U256, mut upper: U256, mut cond: F) -> Result + where F: FnMut(U256) -> Result + { while upper - lower > 1.into() { let mid = (lower + upper) / 2.into(); trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper); - let c = cond(mid); + let c = cond(mid)?; match c { true => upper = mid, false => lower = mid, }; trace!(target: "estimate_gas", "{} => {} .. {}", c, lower, upper); } - upper + Ok(upper) } // binary chop to non-excepting call with gas somewhere between 21000 and block gas limit trace!(target: "estimate_gas", "estimate_gas chopping {} .. {}", lower, upper); - Ok(binary_chop(lower, upper, cond)) + binary_chop(lower, upper, cond) } fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result { @@ -1006,17 +1013,16 @@ impl BlockChainClient for Client { let rest = txs.split_off(address.index); for t in txs { let t = SignedTransaction::new(t).expect(PROOF); - match Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&t, Default::default()) { - Ok(x) => { env_info.gas_used = env_info.gas_used + x.gas_used; } - Err(ee) => { return Err(CallError::Execution(ee)) } - } + let x = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&t, Default::default())?; + env_info.gas_used = env_info.gas_used + x.gas_used; } let first = rest.into_iter().next().expect("We split off < `address.index`; Length is checked earlier; qed"); let t = SignedTransaction::new(first).expect(PROOF); let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&t, options)?; - ret.state_diff = original_state.map(|original| state.diff_from(original)); - + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?) + } Ok(ret) } @@ -1108,11 +1114,11 @@ impl BlockChainClient for Client { } fn nonce(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).map(|s| s.nonce(address)) + self.state_at(id).and_then(|s| s.nonce(address).ok()) } fn storage_root(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).and_then(|s| s.storage_root(address)) + self.state_at(id).and_then(|s| s.storage_root(address).ok()).and_then(|x| x) } fn block_hash(&self, id: BlockId) -> Option { @@ -1121,15 +1127,15 @@ impl BlockChainClient for Client { } fn code(&self, address: &Address, id: BlockId) -> Option> { - self.state_at(id).map(|s| s.code(address).map(|c| (*c).clone())) + self.state_at(id).and_then(|s| s.code(address).ok()).map(|c| c.map(|c| (&*c).clone())) } fn balance(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).map(|s| s.balance(address)) + self.state_at(id).and_then(|s| s.balance(address).ok()) } fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option { - self.state_at(id).map(|s| s.storage_at(address, position)) + self.state_at(id).and_then(|s| s.storage_at(address, position).ok()) } fn list_accounts(&self, id: BlockId, after: Option<&Address>, count: u64) -> Option> { @@ -1182,7 +1188,7 @@ impl BlockChainClient for Client { }; let root = match state.storage_root(account) { - Some(root) => root, + Ok(Some(root)) => root, _ => return None, }; diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 6c7b91d39..3c9196a85 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -215,8 +215,14 @@ impl Engine for Ethash { // if block.fields().header.gas_limit() <= 4_000_000.into() { let mut state = block.fields_mut().state; for child in &self.ethash_params.dao_hardfork_accounts { - let b = state.balance(child); - state.transfer_balance(child, &self.ethash_params.dao_hardfork_beneficiary, &b, CleanupMode::NoEmpty); + let beneficiary = &self.ethash_params.dao_hardfork_beneficiary; + let res = state.balance(child) + .and_then(|b| state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty)); + + if let Err(e) = res { + warn!("Unable to apply DAO hardfork due to database corruption."); + warn!("Your node is now likely out of consensus."); + } } // } } diff --git a/ethcore/src/evm/evm.rs b/ethcore/src/evm/evm.rs index 420ebb6a0..09a93f087 100644 --- a/ethcore/src/evm/evm.rs +++ b/ethcore/src/evm/evm.rs @@ -17,12 +17,12 @@ //! Evm interface. use std::{ops, cmp, fmt}; -use util::{U128, U256, U512, Uint}; +use util::{U128, U256, U512, Uint, trie}; use action_params::ActionParams; use evm::Ext; /// Evm errors. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum Error { /// `OutOfGas` is returned when transaction execution runs out of gas. /// The state should be reverted to the state from before the @@ -61,8 +61,13 @@ pub enum Error { }, /// Returned on evm internal error. Should never be ignored during development. /// Likely to cause consensus issues. - #[allow(dead_code)] // created only by jit - Internal, + Internal(String), +} + +impl From> for Error { + fn from(err: Box) -> Self { + Error::Internal(format!("Internal error: {}", err)) + } } impl fmt::Display for Error { @@ -74,7 +79,7 @@ impl fmt::Display for Error { BadInstruction { .. } => "Bad instruction", StackUnderflow { .. } => "Stack underflow", OutOfStack { .. } => "Out of stack", - Internal => "Internal error", + Internal(ref msg) => msg, }; message.fmt(f) } diff --git a/ethcore/src/evm/ext.rs b/ethcore/src/evm/ext.rs index e2578cc68..352ffb7d9 100644 --- a/ethcore/src/evm/ext.rs +++ b/ethcore/src/evm/ext.rs @@ -42,24 +42,25 @@ pub enum MessageCallResult { } /// Externalities interface for EVMs +// TODO: [rob] associated error type instead of `trie::Result`. Not all EVMs are trie powered. pub trait Ext { /// Returns a value for given key. - fn storage_at(&self, key: &H256) -> H256; + fn storage_at(&self, key: &H256) -> trie::Result; /// Stores a value for given key. - fn set_storage(&mut self, key: H256, value: H256); + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()>; /// Determine whether an account exists. - fn exists(&self, address: &Address) -> bool; + fn exists(&self, address: &Address) -> trie::Result; /// Determine whether an account exists and is not null (zero balance/nonce, no code). - fn exists_and_not_null(&self, address: &Address) -> bool; + fn exists_and_not_null(&self, address: &Address) -> trie::Result; /// Balance of the origin account. - fn origin_balance(&self) -> U256; + fn origin_balance(&self) -> trie::Result; /// Returns address balance. - fn balance(&self, address: &Address) -> U256; + fn balance(&self, address: &Address) -> trie::Result; /// Returns the hash of one of the 256 most recent complete blocks. fn blockhash(&self, number: &U256) -> H256; @@ -87,10 +88,10 @@ pub trait Ext { ) -> MessageCallResult; /// Returns code at given address - fn extcode(&self, address: &Address) -> Arc; + fn extcode(&self, address: &Address) -> trie::Result>; /// Returns code size at given address - fn extcodesize(&self, address: &Address) -> usize; + fn extcodesize(&self, address: &Address) -> trie::Result; /// Creates log entry with given topics and data fn log(&mut self, topics: Vec, data: &[u8]); @@ -101,7 +102,7 @@ pub trait Ext { /// Should be called when contract commits suicide. /// Address to which funds should be refunded. - fn suicide(&mut self, refund_address: &Address); + fn suicide(&mut self, refund_address: &Address) -> trie::Result<()> ; /// Returns schedule. fn schedule(&self) -> &Schedule; diff --git a/ethcore/src/evm/interpreter/gasometer.rs b/ethcore/src/evm/interpreter/gasometer.rs index 5c96c3c05..9086200fa 100644 --- a/ethcore/src/evm/interpreter/gasometer.rs +++ b/ethcore/src/evm/interpreter/gasometer.rs @@ -123,7 +123,7 @@ impl Gasometer { instructions::SSTORE => { let address = H256::from(stack.peek(0)); let newval = stack.peek(1); - let val = U256::from(&*ext.storage_at(&address)); + let val = U256::from(&*ext.storage_at(&address)?); let gas = if val.is_zero() && !newval.is_zero() { schedule.sstore_set_gas @@ -146,12 +146,12 @@ impl Gasometer { instructions::SUICIDE => { let mut gas = Gas::from(schedule.suicide_gas); - let is_value_transfer = !ext.origin_balance().is_zero(); + let is_value_transfer = !ext.origin_balance()?.is_zero(); let address = u256_to_address(stack.peek(0)); if ( - !schedule.no_empty && !ext.exists(&address) + !schedule.no_empty && !ext.exists(&address)? ) || ( - schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address) + schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)? ) { gas = overflowing!(gas.overflow_add(schedule.suicide_to_new_account_cost.into())); } @@ -198,9 +198,9 @@ impl Gasometer { let is_value_transfer = !stack.peek(2).is_zero(); if instruction == instructions::CALL && ( - (!schedule.no_empty && !ext.exists(&address)) + (!schedule.no_empty && !ext.exists(&address)?) || - (schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)) + (schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)?) ) { gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into())); } diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index bc3caa084..79304793e 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -273,7 +273,7 @@ impl Interpreter { let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed"); let contract_code = self.mem.read_slice(init_off, init_size); - let can_create = ext.balance(¶ms.address) >= endowment && ext.depth() < ext.schedule().max_depth; + let can_create = ext.balance(¶ms.address)? >= endowment && ext.depth() < ext.schedule().max_depth; if !can_create { stack.push(U256::zero()); @@ -319,11 +319,11 @@ impl Interpreter { // Get sender & receive addresses, check if we have balance let (sender_address, receive_address, has_balance, call_type) = match instruction { instructions::CALL => { - let has_balance = ext.balance(¶ms.address) >= value.expect("value set for all but delegate call; qed"); + let has_balance = ext.balance(¶ms.address)? >= value.expect("value set for all but delegate call; qed"); (¶ms.address, &code_address, has_balance, CallType::Call) }, instructions::CALLCODE => { - let has_balance = ext.balance(¶ms.address) >= value.expect("value set for all but delegate call; qed"); + let has_balance = ext.balance(¶ms.address)? >= value.expect("value set for all but delegate call; qed"); (¶ms.address, ¶ms.address, has_balance, CallType::CallCode) }, instructions::DELEGATECALL => (¶ms.sender, ¶ms.address, true, CallType::DelegateCall), @@ -366,7 +366,7 @@ impl Interpreter { }, instructions::SUICIDE => { let address = stack.pop_back(); - ext.suicide(&u256_to_address(&address)); + ext.suicide(&u256_to_address(&address))?; return Ok(InstructionResult::StopExecution); }, instructions::LOG0...instructions::LOG4 => { @@ -410,19 +410,19 @@ impl Interpreter { }, instructions::SLOAD => { let key = H256::from(&stack.pop_back()); - let word = U256::from(&*ext.storage_at(&key)); + let word = U256::from(&*ext.storage_at(&key)?); stack.push(word); }, instructions::SSTORE => { let address = H256::from(&stack.pop_back()); let val = stack.pop_back(); - let current_val = U256::from(&*ext.storage_at(&address)); + let current_val = U256::from(&*ext.storage_at(&address)?); // Increase refund for clear if !self.is_zero(¤t_val) && self.is_zero(&val) { ext.inc_sstore_clears(); } - ext.set_storage(address, H256::from(&val)); + ext.set_storage(address, H256::from(&val))?; }, instructions::PC => { stack.push(U256::from(code.position - 1)); @@ -438,7 +438,7 @@ impl Interpreter { }, instructions::BALANCE => { let address = u256_to_address(&stack.pop_back()); - let balance = ext.balance(&address); + let balance = ext.balance(&address)?; stack.push(balance); }, instructions::CALLER => { @@ -474,7 +474,7 @@ impl Interpreter { }, instructions::EXTCODESIZE => { let address = u256_to_address(&stack.pop_back()); - let len = ext.extcodesize(&address); + let len = ext.extcodesize(&address)?; stack.push(U256::from(len)); }, instructions::CALLDATACOPY => { @@ -485,7 +485,7 @@ impl Interpreter { }, instructions::EXTCODECOPY => { let address = u256_to_address(&stack.pop_back()); - let code = ext.extcode(&address); + let code = ext.extcode(&address)?; self.copy_data_to_memory(stack, &code); }, instructions::GASPRICE => { diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 37b53202a..d287857a0 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -123,7 +123,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { mut vm_tracer: V ) -> Result where T: Tracer, V: VMTracer { let sender = t.sender(); - let nonce = self.state.nonce(&sender); + let nonce = self.state.nonce(&sender)?; let schedule = self.engine.schedule(self.info); let base_gas_required = U256::from(t.gas_required(&schedule)); @@ -149,7 +149,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { } // TODO: we might need bigints here, or at least check overflows. - let balance = self.state.balance(&sender); + let balance = self.state.balance(&sender)?; let gas_cost = t.gas.full_mul(t.gas_price); let total_cost = U512::from(t.value) + gas_cost; @@ -160,8 +160,8 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { } // NOTE: there can be no invalid transactions from this point. - self.state.inc_nonce(&sender); - self.state.sub_balance(&sender, &U256::from(gas_cost)); + self.state.inc_nonce(&sender)?; + self.state.sub_balance(&sender, &U256::from(gas_cost))?; let mut substate = Substate::new(); @@ -192,8 +192,8 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { gas: init_gas, gas_price: t.gas_price, value: ActionValue::Transfer(t.value), - code: self.state.code(address), - code_hash: self.state.code_hash(address), + code: self.state.code(address)?, + code_hash: self.state.code_hash(address)?, data: Some(t.data.clone()), call_type: CallType::Call, }; @@ -257,7 +257,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // at first, transfer value to destination if let ActionValue::Transfer(val) = params.value { - self.state.transfer_balance(¶ms.sender, ¶ms.address, &val, substate.to_cleanup_mode(&schedule)); + self.state.transfer_balance(¶ms.sender, ¶ms.address, &val, substate.to_cleanup_mode(&schedule))?; } trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); @@ -322,13 +322,13 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let traces = subtracer.traces(); match res { - Ok(gas_left) => tracer.trace_call( + Ok(ref gas_left) => tracer.trace_call( trace_info, - gas - gas_left, + gas - *gas_left, trace_output, traces ), - Err(e) => tracer.trace_failed_call(trace_info, traces, e.into()), + Err(ref e) => tracer.trace_failed_call(trace_info, traces, e.into()), }; trace!(target: "executive", "substate={:?}; unconfirmed_substate={:?}\n", substate, unconfirmed_substate); @@ -365,9 +365,9 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // create contract and transfer value to it if necessary let schedule = self.engine.schedule(self.info); let nonce_offset = if schedule.no_empty {1} else {0}.into(); - let prev_bal = self.state.balance(¶ms.address); + let prev_bal = self.state.balance(¶ms.address)?; if let ActionValue::Transfer(val) = params.value { - self.state.sub_balance(¶ms.sender, &val); + self.state.sub_balance(¶ms.sender, &val)?; self.state.new_contract(¶ms.address, val + prev_bal, nonce_offset); } else { self.state.new_contract(¶ms.address, prev_bal, nonce_offset); @@ -388,14 +388,14 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { vm_tracer.done_subtrace(subvmtracer); match res { - Ok(gas_left) => tracer.trace_create( + Ok(ref gas_left) => tracer.trace_create( trace_info, - gas - gas_left, + gas - *gas_left, trace_output, created, subtracer.traces() ), - Err(e) => tracer.trace_failed_create(trace_info, subtracer.traces(), e.into()) + Err(ref e) => tracer.trace_failed_create(trace_info, subtracer.traces(), e.into()) }; self.enact_result(&res, substate, unconfirmed_substate); @@ -435,9 +435,9 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let sender = t.sender(); trace!("exec::finalize: Refunding refund_value={}, sender={}\n", refund_value, sender); // Below: NoEmpty is safe since the sender must already be non-null to have sent this transaction - self.state.add_balance(&sender, &refund_value, CleanupMode::NoEmpty); + self.state.add_balance(&sender, &refund_value, CleanupMode::NoEmpty)?; trace!("exec::finalize: Compensating author: fees_value={}, author={}\n", fees_value, &self.info.author); - self.state.add_balance(&self.info.author, &fees_value, substate.to_cleanup_mode(&schedule)); + self.state.add_balance(&self.info.author, &fees_value, substate.to_cleanup_mode(&schedule))?; // perform suicides for address in &substate.suicides { @@ -446,13 +446,13 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // perform garbage-collection for address in &substate.garbage { - if self.state.exists(address) && !self.state.exists_and_not_null(address) { + if self.state.exists(address)? && !self.state.exists_and_not_null(address)? { self.state.kill_account(address); } } match result { - Err(evm::Error::Internal) => Err(ExecutionError::Internal), + Err(evm::Error::Internal(msg)) => Err(ExecutionError::Internal(msg)), Err(exception) => { Ok(Executed { exception: Some(exception), @@ -495,7 +495,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { | Err(evm::Error::OutOfStack {..}) => { self.state.revert_to_checkpoint(); }, - Ok(_) | Err(evm::Error::Internal) => { + Ok(_) | Err(evm::Error::Internal(_)) => { self.state.discard_checkpoint(); substate.accrue(un_substate); } diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 49ed2261e..db4a587d1 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -108,25 +108,25 @@ impl<'a, T: 'a, V: 'a, B: 'a> Externalities<'a, T, V, B> impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> where T: Tracer, V: VMTracer, B: StateBackend { - fn storage_at(&self, key: &H256) -> H256 { + fn storage_at(&self, key: &H256) -> trie::Result { self.state.storage_at(&self.origin_info.address, key) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.state.set_storage(&self.origin_info.address, key, value) } - fn exists(&self, address: &Address) -> bool { + fn exists(&self, address: &Address) -> trie::Result { self.state.exists(address) } - fn exists_and_not_null(&self, address: &Address) -> bool { + fn exists_and_not_null(&self, address: &Address) -> trie::Result { self.state.exists_and_not_null(address) } - fn origin_balance(&self) -> U256 { self.balance(&self.origin_info.address) } + fn origin_balance(&self) -> trie::Result { self.balance(&self.origin_info.address) } - fn balance(&self, address: &Address) -> U256 { + fn balance(&self, address: &Address) -> trie::Result { self.state.balance(address) } @@ -149,7 +149,13 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { // create new contract address - let address = contract_address(&self.origin_info.address, &self.state.nonce(&self.origin_info.address)); + let address = match self.state.nonce(&self.origin_info.address) { + Ok(nonce) => contract_address(&self.origin_info.address, &nonce), + Err(e) => { + debug!(target: "ext", "Database corruption encountered: {:?}", e); + return ContractCreateResult::Failed + } + }; // prepare the params let params = ActionParams { @@ -166,7 +172,10 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> call_type: CallType::None, }; - self.state.inc_nonce(&self.origin_info.address); + if let Err(e) = self.state.inc_nonce(&self.origin_info.address) { + debug!(target: "ext", "Database corruption encountered: {:?}", e); + return ContractCreateResult::Failed + } let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth); // TODO: handle internal error separately @@ -191,6 +200,14 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> ) -> MessageCallResult { trace!(target: "externalities", "call"); + let code_res = self.state.code(code_address) + .and_then(|code| self.state.code_hash(code_address).map(|hash| (code, hash))); + + let (code, code_hash) = match code_res { + Ok((code, hash)) => (code, hash), + Err(_) => return MessageCallResult::Failed, + }; + let mut params = ActionParams { sender: sender_address.clone(), address: receive_address.clone(), @@ -199,8 +216,8 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price, - code: self.state.code(code_address), - code_hash: self.state.code_hash(code_address), + code: code, + code_hash: code_hash, data: Some(data.to_vec()), call_type: call_type, }; @@ -217,12 +234,12 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> } } - fn extcode(&self, address: &Address) -> Arc { - self.state.code(address).unwrap_or_else(|| Arc::new(vec![])) + fn extcode(&self, address: &Address) -> trie::Result> { + Ok(self.state.code(address)?.unwrap_or_else(|| Arc::new(vec![]))) } - fn extcodesize(&self, address: &Address) -> usize { - self.state.code_size(address).unwrap_or(0) + fn extcodesize(&self, address: &Address) -> trie::Result { + Ok(self.state.code_size(address)?.unwrap_or(0)) } #[cfg_attr(feature="dev", allow(match_ref_pats))] @@ -257,10 +274,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> handle_copy(copy); - let mut code = vec![]; - code.extend_from_slice(data); - - self.state.init_code(&self.origin_info.address, code); + self.state.init_code(&self.origin_info.address, data.to_vec())?; Ok(*gas - return_cost) } } @@ -277,19 +291,26 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> }); } - fn suicide(&mut self, refund_address: &Address) { + fn suicide(&mut self, refund_address: &Address) -> trie::Result<()> { let address = self.origin_info.address.clone(); - let balance = self.balance(&address); + let balance = self.balance(&address)?; if &address == refund_address { // TODO [todr] To be consistent with CPP client we set balance to 0 in that case. - self.state.sub_balance(&address, &balance); + self.state.sub_balance(&address, &balance)?; } else { trace!(target: "ext", "Suiciding {} -> {} (xfer: {})", address, refund_address, balance); - self.state.transfer_balance(&address, refund_address, &balance, self.substate.to_cleanup_mode(&self.schedule)); + self.state.transfer_balance( + &address, + refund_address, + &balance, + self.substate.to_cleanup_mode(&self.schedule) + )?; } self.tracer.trace_suicide(address, balance, refund_address.clone()); self.substate.suicides.insert(address); + + Ok(()) } fn schedule(&self) -> &Schedule { diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 9cfd4a4a0..432c58025 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -711,7 +711,7 @@ impl MinerService for Miner { let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let sender = t.sender(); - let balance = state.balance(&sender); + let balance = state.balance(&sender).map_err(ExecutionError::from)?; let needed_balance = t.value + t.gas * t.gas_price; if balance < needed_balance { // give the sender a sufficient balance @@ -721,7 +721,9 @@ impl MinerService for Miner { let mut ret = Executive::new(&mut state, &env_info, &*self.engine, client.vm_factory()).transact(t, options)?; // TODO gav move this into Executive. - ret.state_diff = original_state.map(|original| state.diff_from(original)); + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); + } Ok(ret) }, @@ -729,35 +731,37 @@ impl MinerService for Miner { } } - fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { + // TODO: The `chain.latest_x` actually aren't infallible, they just panic on corruption. + // TODO: return trie::Result here, or other. + fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_balance(address), - |b| b.block().fields().state.balance(address) + || Some(chain.latest_balance(address)), + |b| b.block().fields().state.balance(address).ok(), ) } - fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { + fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_storage_at(address, position), - |b| b.block().fields().state.storage_at(address, position) + || Some(chain.latest_storage_at(address, position)), + |b| b.block().fields().state.storage_at(address, position).ok(), ) } - fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { + fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_nonce(address), - |b| b.block().fields().state.nonce(address) + || Some(chain.latest_nonce(address)), + |b| b.block().fields().state.nonce(address).ok(), ) } - fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { + fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option> { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_code(address), - |b| b.block().fields().state.code(address).map(|c| (*c).clone()) + || Some(chain.latest_code(address)), + |b| b.block().fields().state.code(address).ok().map(|c| c.map(|c| (&*c).clone())) ) } diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index eee9d1c5c..e7dc52055 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -62,7 +62,7 @@ pub use self::work_notify::NotifyWork; pub use self::stratum::{Stratum, Error as StratumError, Options as StratumOptions}; use std::collections::BTreeMap; -use util::{H256, U256, Address, Bytes}; +use util::{H256, U256, Address, Bytes, trie}; use client::{MiningBlockChainClient, Executed, CallAnalytics}; use block::ClosedBlock; use header::BlockNumber; @@ -181,19 +181,19 @@ pub trait MinerService : Send + Sync { fn sensible_gas_limit(&self) -> U256 { 21000.into() } /// Latest account balance in pending state. - fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256; + fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; /// Call into contract code using pending state. fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result; /// Get storage value in pending state. - fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256; + fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option; /// Get account nonce in pending state. - fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256; + fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; /// Get contract code in pending state. - fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; + fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option>; } /// Mining status diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 6e5297bc4..acd1591f7 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -169,22 +169,16 @@ impl Account { /// Get (and cache) the contents of the trie's storage at `key`. /// Takes modifed storage into account. - pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 { + pub fn storage_at(&self, db: &HashDB, key: &H256) -> trie::Result { if let Some(value) = self.cached_storage_at(key) { - return value; + return Ok(value); } - let db = SecTrieDB::new(db, &self.storage_root) - .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ - SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ - using it will not fail."); + let db = SecTrieDB::new(db, &self.storage_root)?; - let item: U256 = match db.get_with(key, ::rlp::decode) { - Ok(x) => x.unwrap_or_else(U256::zero), - Err(e) => panic!("Encountered potential DB corruption: {}", e), - }; + let item: U256 = db.get_with(key, ::rlp::decode)?.unwrap_or_else(U256::zero); let value: H256 = item.into(); self.storage_cache.borrow_mut().insert(key.clone(), value.clone()); - value + Ok(value) } /// Get cached storage value if any. Returns `None` if the @@ -345,24 +339,19 @@ impl Account { } /// Commit the `storage_changes` to the backing DB and update `storage_root`. - pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) { - let mut t = trie_factory.from_existing(db, &mut self.storage_root) - .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ - SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ - using it will not fail."); + pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) -> trie::Result<()> { + let mut t = trie_factory.from_existing(db, &mut self.storage_root)?; for (k, v) in self.storage_changes.drain() { // cast key and value to trait type, // so we can call overloaded `to_bytes` method - let res = match v.is_zero() { - true => t.remove(&k), - false => t.insert(&k, &encode(&U256::from(&*v))), + match v.is_zero() { + true => t.remove(&k)?, + false => t.insert(&k, &encode(&U256::from(&*v)))?, }; - if let Err(e) = res { - warn!("Encountered potential DB corruption: {}", e); - } self.storage_cache.borrow_mut().insert(k, v); } + Ok(()) } /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 1d26ea30f..b88a6a9f1 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -37,6 +37,7 @@ use state_db::StateDB; use util::*; +use util::trie; use util::trie::recorder::Recorder; mod account; @@ -362,37 +363,37 @@ impl State { } /// Determine whether an account exists. - pub fn exists(&self, a: &Address) -> bool { + pub fn exists(&self, a: &Address) -> trie::Result { // Bloom filter does not contain empty accounts, so it is important here to // check if account exists in the database directly before EIP-161 is in effect. self.ensure_cached(a, RequireCache::None, false, |a| a.is_some()) } /// Determine whether an account exists and if not empty. - pub fn exists_and_not_null(&self, a: &Address) -> bool { + pub fn exists_and_not_null(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, false, |a| a.map_or(false, |a| !a.is_null())) } /// Get the balance of account `a`. - pub fn balance(&self, a: &Address) -> U256 { + pub fn balance(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(U256::zero(), |account| *account.balance())) } /// Get the nonce of account `a`. - pub fn nonce(&self, a: &Address) -> U256 { + pub fn nonce(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce())) } /// Get the storage root of account `a`. - pub fn storage_root(&self, a: &Address) -> Option { + pub fn storage_root(&self, a: &Address) -> trie::Result> { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().and_then(|account| account.storage_root().cloned())) } /// Mutate storage of account `address` so that it is `value` for `key`. - pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { + pub fn storage_at(&self, address: &Address, key: &H256) -> trie::Result { // Storage key search and update works like this: // 1. If there's an entry for the account in the local cache check for the key and return it if found. // 2. If there's an entry for the account in the global cache check for the key or load it into that account. @@ -406,42 +407,46 @@ impl State { match maybe_acc.account { Some(ref account) => { if let Some(value) = account.cached_storage_at(key) { - return value; + return Ok(value); } else { local_account = Some(maybe_acc); } }, - _ => return H256::new(), + _ => return Ok(H256::new()), } } // check the global cache and and cache storage key there if found, - // otherwise cache the account localy and cache storage key there. - if let Some(result) = self.db.get_cached(address, |acc| acc.map_or(H256::new(), |a| { + let trie_res = self.db.get_cached(address, |acc| match acc { + None => Ok(H256::new()), + Some(a) => { let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); a.storage_at(account_db.as_hashdb(), key) - })) { - return result; + } + }); + + match trie_res { + None => {} + Some(res) => return res, } + + // otherwise cache the account localy and cache storage key there. if let Some(ref mut acc) = local_account { if let Some(ref account) = acc.account { let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(address)); return account.storage_at(account_db.as_hashdb(), key) } else { - return H256::new() + return Ok(H256::new()) } } } // check if the account could exist before any requests to trie - if self.db.is_known_null(address) { return H256::zero() } + if self.db.is_known_null(address) { return Ok(H256::zero()) } // account is not found in the global cache, get from the DB and insert into local let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let maybe_acc = match db.get_with(address, Account::from_rlp) { - Ok(acc) => acc, - Err(e) => panic!("Potential DB corruption encountered: {}", e), - }; - let r = maybe_acc.as_ref().map_or(H256::new(), |a| { + let maybe_acc = db.get_with(address, Account::from_rlp)?; + let r = maybe_acc.as_ref().map_or(Ok(H256::new()), |a| { let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); a.storage_at(account_db.as_hashdb(), key) }); @@ -450,75 +455,84 @@ impl State { } /// Get accounts' code. - pub fn code(&self, a: &Address) -> Option> { + pub fn code(&self, a: &Address) -> trie::Result>> { self.ensure_cached(a, RequireCache::Code, true, |a| a.as_ref().map_or(None, |a| a.code().clone())) } /// Get an account's code hash. - pub fn code_hash(&self, a: &Address) -> H256 { + pub fn code_hash(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(SHA3_EMPTY, |a| a.code_hash())) } /// Get accounts' code size. - pub fn code_size(&self, a: &Address) -> Option { + pub fn code_size(&self, a: &Address) -> trie::Result> { self.ensure_cached(a, RequireCache::CodeSize, true, |a| a.as_ref().and_then(|a| a.code_size())) } /// Add `incr` to the balance of account `a`. #[cfg_attr(feature="dev", allow(single_match))] - pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) { - trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)); + pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) -> trie::Result<()> { + trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)?); let is_value_transfer = !incr.is_zero(); - if is_value_transfer || (cleanup_mode == CleanupMode::ForceCreate && !self.exists(a)) { - self.require(a, false).add_balance(incr); + if is_value_transfer || (cleanup_mode == CleanupMode::ForceCreate && !self.exists(a)?) { + self.require(a, false)?.add_balance(incr); } else { match cleanup_mode { - CleanupMode::KillEmpty(set) => if !is_value_transfer && self.exists(a) && !self.exists_and_not_null(a) { + CleanupMode::KillEmpty(set) => if !is_value_transfer && self.exists(a)? && !self.exists_and_not_null(a)? { set.insert(a.clone()); }, _ => {} } } + + Ok(()) } /// Subtract `decr` from the balance of account `a`. - pub fn sub_balance(&mut self, a: &Address, decr: &U256) { - trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)); - if !decr.is_zero() || !self.exists(a) { - self.require(a, false).sub_balance(decr); + pub fn sub_balance(&mut self, a: &Address, decr: &U256) -> trie::Result<()> { + trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)?); + if !decr.is_zero() || !self.exists(a)? { + self.require(a, false)?.sub_balance(decr); } + + Ok(()) } /// Subtracts `by` from the balance of `from` and adds it to that of `to`. - pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256, cleanup_mode: CleanupMode) { - self.sub_balance(from, by); - self.add_balance(to, by, cleanup_mode); + pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256, cleanup_mode: CleanupMode) -> trie::Result<()> { + self.sub_balance(from, by)?; + self.add_balance(to, by, cleanup_mode)?; + Ok(()) } /// Increment the nonce of account `a` by 1. - pub fn inc_nonce(&mut self, a: &Address) { - self.require(a, false).inc_nonce() + pub fn inc_nonce(&mut self, a: &Address) -> trie::Result<()> { + self.require(a, false).map(|mut x| x.inc_nonce()) } /// Mutate storage of account `a` so that it is `value` for `key`. - pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) { - if self.storage_at(a, &key) != value { - self.require(a, false).set_storage(key, value) + pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) -> trie::Result<()> { + if self.storage_at(a, &key)? != value { + self.require(a, false)?.set_storage(key, value) } + + Ok(()) } /// Initialise the code of account `a` so that it is `code`. /// NOTE: Account should have been created with `new_contract`. - pub fn init_code(&mut self, a: &Address, code: Bytes) { - self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).init_code(code); + pub fn init_code(&mut self, a: &Address, code: Bytes) -> trie::Result<()> { + self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{})?.init_code(code); + Ok(()) } /// Reset the code of account `a` so that it is `code`. - pub fn reset_code(&mut self, a: &Address, code: Bytes) { - self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).reset_code(code); + pub fn reset_code(&mut self, a: &Address, code: Bytes) -> trie::Result<()> { + self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{})?.reset_code(code); + Ok(()) } /// Execute a given transaction. @@ -629,25 +643,29 @@ impl State { })) } - fn query_pod(&mut self, query: &PodState) { - for (address, pod_account) in query.get().into_iter() - .filter(|&(a, _)| self.ensure_cached(a, RequireCache::Code, true, |a| a.is_some())) - { + fn query_pod(&mut self, query: &PodState) -> trie::Result<()> { + for (address, pod_account) in query.get() { + if !self.ensure_cached(address, RequireCache::Code, true, |a| a.is_some())? { + continue + } + // needs to be split into two parts for the refcell code here // to work. for key in pod_account.storage.keys() { - self.storage_at(address, key); + self.storage_at(address, key)?; } } + + Ok(()) } /// Returns a `StateDiff` describing the difference from `orig` to `self`. /// Consumes self. - pub fn diff_from(&self, orig: State) -> StateDiff { + pub fn diff_from(&self, orig: State) -> trie::Result { let pod_state_post = self.to_pod(); let mut state_pre = orig; - state_pre.query_pod(&pod_state_post); - pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post) + state_pre.query_pod(&pod_state_post)?; + Ok(pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post)) } // load required account data from the databases. @@ -681,16 +699,16 @@ impl State { /// Check caches for required data /// First searches for account in the local, then the shared cache. /// Populates local cache if nothing found. - fn ensure_cached(&self, a: &Address, require: RequireCache, check_null: bool, f: F) -> U + fn ensure_cached(&self, a: &Address, require: RequireCache, check_null: bool, f: F) -> trie::Result where F: Fn(Option<&Account>) -> U { // check local cache first if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) { if let Some(ref mut account) = maybe_acc.account { let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); Self::update_account_cache(require, account, &self.db, accountdb.as_hashdb()); - return f(Some(account)); + return Ok(f(Some(account))); } - return f(None); + return Ok(f(None)); } // check global cache let result = self.db.get_cached(a, |mut acc| { @@ -701,37 +719,34 @@ impl State { f(acc.map(|a| &*a)) }); match result { - Some(r) => r, + Some(r) => Ok(r), None => { // first check if it is not in database for sure - if check_null && self.db.is_known_null(a) { return f(None); } + if check_null && self.db.is_known_null(a) { return Ok(f(None)); } // not found in the global cache, get from the DB and insert into local - let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let mut maybe_acc = match db.get_with(a, Account::from_rlp) { - Ok(acc) => acc, - Err(e) => panic!("Potential DB corruption encountered: {}", e), - }; + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root)?; + let mut maybe_acc = db.get_with(a, Account::from_rlp)?; if let Some(ref mut account) = maybe_acc.as_mut() { let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); Self::update_account_cache(require, account, &self.db, accountdb.as_hashdb()); } let r = f(maybe_acc.as_ref()); self.insert_cache(a, AccountEntry::new_clean(maybe_acc)); - r + Ok(r) } } } /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. - fn require<'a>(&'a self, a: &Address, require_code: bool) -> RefMut<'a, Account> { + fn require<'a>(&'a self, a: &Address, require_code: bool) -> trie::Result> { self.require_or_from(a, require_code, || Account::new_basic(U256::from(0u8), self.account_start_nonce), |_|{}) } /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. /// If it doesn't exist, make account equal the evaluation of `default`. - fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) - -> RefMut<'a, Account> + fn require_or_from<'a, F, G>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) -> trie::Result> + where F: FnOnce() -> Account, G: FnOnce(&mut Account), { let contains_key = self.cache.borrow().contains_key(a); if !contains_key { @@ -739,11 +754,8 @@ impl State { Some(acc) => self.insert_cache(a, AccountEntry::new_clean_cached(acc)), None => { let maybe_acc = if !self.db.is_known_null(a) { - let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - match db.get_with(a, Account::from_rlp) { - Ok(acc) => AccountEntry::new_clean(acc), - Err(e) => panic!("Potential DB corruption encountered: {}", e), - } + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root)?; + AccountEntry::new_clean(db.get_with(a, Account::from_rlp)?) } else { AccountEntry::new_clean(None) }; @@ -754,7 +766,7 @@ impl State { self.note_cache(a); // at this point the entry is guaranteed to be in the cache. - RefMut::map(self.cache.borrow_mut(), |c| { + Ok(RefMut::map(self.cache.borrow_mut(), |c| { let mut entry = c.get_mut(a).expect("entry known to exist in the cache; qed"); match &mut entry.account { @@ -775,18 +787,18 @@ impl State { }, _ => panic!("Required account must always exist; qed"), } - }) + })) } } -// LES state proof implementations. +// State proof implementations; useful for light client protocols. impl State { /// Prove an account's existence or nonexistence in the state trie. /// Returns a merkle proof of the account's trie node with all nodes before `from_level` /// omitted or an encountered trie error. /// Requires a secure trie to be used for accurate results. /// `account_key` == sha3(address) - pub fn prove_account(&self, account_key: H256, from_level: u32) -> Result, Box> { + pub fn prove_account(&self, account_key: H256, from_level: u32) -> trie::Result> { let mut recorder = Recorder::with_depth(from_level); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; trie.get_with(&account_key, &mut recorder)?; @@ -799,7 +811,7 @@ impl State { /// `from_level` omitted. Requires a secure trie to be used for correctness. /// `account_key` == sha3(address) /// `storage_key` == sha3(key) - pub fn prove_storage(&self, account_key: H256, storage_key: H256, from_level: u32) -> Result, Box> { + pub fn prove_storage(&self, account_key: H256, storage_key: H256, from_level: u32) -> trie::Result> { // TODO: probably could look into cache somehow but it's keyed by // address, not sha3(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; @@ -814,7 +826,7 @@ impl State { /// Get code by address hash. /// Only works when backed by a secure trie. - pub fn code_by_address_hash(&self, account_key: H256) -> Result, Box> { + pub fn code_by_address_hash(&self, account_key: H256) -> trie::Result> { let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let mut acc = match trie.get_with(&account_key, Account::from_rlp)? { Some(acc) => acc, diff --git a/ethcore/src/types/executed.rs b/ethcore/src/types/executed.rs index 21858c194..4301044ce 100644 --- a/ethcore/src/types/executed.rs +++ b/ethcore/src/types/executed.rs @@ -16,7 +16,7 @@ //! Transaction execution format module. -use util::{Bytes, U256, Address, U512}; +use util::{Bytes, U256, Address, U512, trie}; use rlp::*; use evm; use trace::{VMTrace, FlatTrace}; @@ -146,27 +146,33 @@ pub enum ExecutionError { got: U512 }, /// Returned when internal evm error occurs. - Internal, + Internal(String), /// Returned when generic transaction occurs TransactionMalformed(String), } +impl From> for ExecutionError { + fn from(err: Box) -> Self { + ExecutionError::Internal(format!("{}", err)) + } +} + impl fmt::Display for ExecutionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::ExecutionError::*; let msg = match *self { - NotEnoughBaseGas { required, got } => + NotEnoughBaseGas { ref required, ref got } => format!("Not enough base gas. {} is required, but only {} paid", required, got), - BlockGasLimitReached { gas_limit, gas_used, gas } => + BlockGasLimitReached { ref gas_limit, ref gas_used, ref gas } => format!("Block gas limit reached. The limit is {}, {} has \ already been used, and {} more is required", gas_limit, gas_used, gas), - InvalidNonce { expected, got } => + InvalidNonce { ref expected, ref got } => format!("Invalid transaction nonce: expected {}, found {}", expected, got), - NotEnoughCash { required, got } => + NotEnoughCash { ref required, ref got } => format!("Cost of transaction exceeds sender balance. {} is required \ but the sender only has {}", required, got), - Internal => "Internal evm error".into(), + Internal(ref msg) => msg.clone(), TransactionMalformed(ref err) => format!("Malformed transaction: {}", err), }; @@ -184,6 +190,8 @@ pub enum CallError { StatePruned, /// Couldn't find an amount of gas that didn't result in an exception. Exceptional, + /// Corrupt state. + StateCorrupt, /// Error executing. Execution(ExecutionError), } @@ -202,6 +210,7 @@ impl fmt::Display for CallError { TransactionNotFound => "Transaction couldn't be found in the chain".into(), StatePruned => "Couldn't find the transaction block's state in the chain".into(), Exceptional => "An exception happened in the execution".into(), + StateCorrupt => "Stored state found to be corrupted.".into(), Execution(ref e) => format!("{}", e), }; diff --git a/ethcore/src/types/trace_types/error.rs b/ethcore/src/types/trace_types/error.rs index 7eb16570c..ea3d32679 100644 --- a/ethcore/src/types/trace_types/error.rs +++ b/ethcore/src/types/trace_types/error.rs @@ -40,19 +40,25 @@ pub enum Error { Internal, } -impl From for Error { - fn from(e: EvmError) -> Self { - match e { +impl<'a> From<&'a EvmError> for Error { + fn from(e: &'a EvmError) -> Self { + match *e { EvmError::OutOfGas => Error::OutOfGas, EvmError::BadJumpDestination { .. } => Error::BadJumpDestination, EvmError::BadInstruction { .. } => Error::BadInstruction, EvmError::StackUnderflow { .. } => Error::StackUnderflow, EvmError::OutOfStack { .. } => Error::OutOfStack, - EvmError::Internal => Error::Internal, + EvmError::Internal(_) => Error::Internal, } } } +impl From for Error { + fn from(e: EvmError) -> Self { + Error::from(&e) + } +} + impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; From 91753c53cd5a72de764b5354eeff4d1c59980d40 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 18:41:01 +0100 Subject: [PATCH 02/91] fix tests, json tests --- ethcore/src/engines/authority_round.rs | 8 +- ethcore/src/engines/tendermint/mod.rs | 8 +- ethcore/src/ethereum/ethash.rs | 30 +++- ethcore/src/ethereum/mod.rs | 12 +- ethcore/src/evm/tests.rs | 31 ++-- ethcore/src/executive.rs | 50 +++--- ethcore/src/externalities.rs | 2 +- ethcore/src/json_tests/executive.rs | 60 ++++--- ethcore/src/miner/miner.rs | 3 +- ethcore/src/miner/mod.rs | 2 +- ethcore/src/spec/spec.rs | 2 +- ethcore/src/state/account.rs | 14 +- ethcore/src/state/mod.rs | 224 ++++++++++++------------- ethcore/src/tests/client.rs | 4 +- 14 files changed, 245 insertions(+), 205 deletions(-) diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index 4f99a644e..e4efdaea1 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -250,10 +250,12 @@ impl Engine for AuthorityRound { fn on_close_block(&self, block: &mut ExecutedBlock) { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty); + let res = fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty) + .map_err(::error::Error::from) + .and_then(|_| fields.state.commit()); // Commit state so that we can actually figure out the state root. - if let Err(e) = fields.state.commit() { - warn!("Encountered error on state commit: {}", e); + if let Err(e) = res { + warn!("Encountered error on closing block: {}", e); } } diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index 47117f83a..9b750a221 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -467,10 +467,12 @@ impl Engine for Tendermint { fn on_close_block(&self, block: &mut ExecutedBlock) { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty); + let res = fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty) + .map_err(::error::Error::from) + .and_then(|_| fields.state.commit()); // Commit state so that we can actually figure out the state root. - if let Err(e) = fields.state.commit() { - warn!("Encountered error on state commit: {}", e); + if let Err(e) = res { + warn!("Encountered error on closing block: {}", e); } } diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 3c9196a85..67c956cdc 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -213,13 +213,13 @@ impl Engine for Ethash { if block.fields().header.number() == self.ethash_params.dao_hardfork_transition { // TODO: enable trigger function maybe? // if block.fields().header.gas_limit() <= 4_000_000.into() { - let mut state = block.fields_mut().state; + let state = block.fields_mut().state; for child in &self.ethash_params.dao_hardfork_accounts { let beneficiary = &self.ethash_params.dao_hardfork_beneficiary; let res = state.balance(child) .and_then(|b| state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty)); - if let Err(e) = res { + if let Err(_) = res { warn!("Unable to apply DAO hardfork due to database corruption."); warn!("Your node is now likely out of consensus."); } @@ -235,12 +235,28 @@ impl Engine for Ethash { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())), CleanupMode::NoEmpty); + let res = fields.state.add_balance( + fields.header.author(), + &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())), + CleanupMode::NoEmpty + ); + + if let Err(e) = res { + warn!("Failed to give block reward: {}", e); + } // Bestow uncle rewards let current_number = fields.header.number(); for u in fields.uncles.iter() { - fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)), CleanupMode::NoEmpty); + let res = fields.state.add_balance( + u.author(), + &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)), + CleanupMode::NoEmpty + ); + + if let Err(e) = res { + warn!("Failed to give uncle reward: {}", e); + } } // Commit state so that we can actually figure out the state root. @@ -473,7 +489,7 @@ mod tests { let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close(); - assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap()); + assert_eq!(b.state().balance(&Address::zero()).unwrap(), U256::from_str("4563918244f40000").unwrap()); } #[test] @@ -491,8 +507,8 @@ mod tests { b.push_uncle(uncle).unwrap(); let b = b.close(); - assert_eq!(b.state().balance(&Address::zero()), "478eae0e571ba000".into()); - assert_eq!(b.state().balance(&uncle_author), "3cb71f51fc558000".into()); + assert_eq!(b.state().balance(&Address::zero()).unwrap(), "478eae0e571ba000".into()); + assert_eq!(b.state().balance(&uncle_author).unwrap(), "3cb71f51fc558000".into()); } #[test] diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index c8eb44911..b15c9e4de 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -91,12 +91,12 @@ mod tests { let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap(); let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap(); - assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000003".into()), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000004".into()), 1u64.into()); - assert_eq!(s.balance(&"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c".into()), U256::from(1u64) << 200); - assert_eq!(s.balance(&"0000000000000000000000000000000000000000".into()), 0u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000003".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000004".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c".into()).unwrap(), U256::from(1u64) << 200); + assert_eq!(s.balance(&"0000000000000000000000000000000000000000".into()).unwrap(), 0u64.into()); } #[test] diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index ccf711a40..3002c170c 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -82,28 +82,29 @@ impl Default for Schedule { } impl Ext for FakeExt { - fn storage_at(&self, key: &H256) -> H256 { - self.store.get(key).unwrap_or(&H256::new()).clone() + fn storage_at(&self, key: &H256) -> trie::Result { + Ok(self.store.get(key).unwrap_or(&H256::new()).clone()) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.store.insert(key, value); + Ok(()) } - fn exists(&self, address: &Address) -> bool { - self.balances.contains_key(address) + fn exists(&self, address: &Address) -> trie::Result { + Ok(self.balances.contains_key(address)) } - fn exists_and_not_null(&self, address: &Address) -> bool { - self.balances.get(address).map_or(false, |b| !b.is_zero()) + fn exists_and_not_null(&self, address: &Address) -> trie::Result { + Ok(self.balances.get(address).map_or(false, |b| !b.is_zero())) } - fn origin_balance(&self) -> U256 { + fn origin_balance(&self) -> trie::Result { unimplemented!() } - fn balance(&self, address: &Address) -> U256 { - self.balances[address] + fn balance(&self, address: &Address) -> trie::Result { + Ok(self.balances[address]) } fn blockhash(&self, number: &U256) -> H256 { @@ -146,12 +147,12 @@ impl Ext for FakeExt { MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Arc { - self.codes.get(address).unwrap_or(&Arc::new(Bytes::new())).clone() + fn extcode(&self, address: &Address) -> trie::Result> { + Ok(self.codes.get(address).unwrap_or(&Arc::new(Bytes::new())).clone()) } - fn extcodesize(&self, address: &Address) -> usize { - self.codes.get(address).map_or(0, |c| c.len()) + fn extcodesize(&self, address: &Address) -> trie::Result { + Ok(self.codes.get(address).map_or(0, |c| c.len())) } fn log(&mut self, topics: Vec, data: &[u8]) { @@ -165,7 +166,7 @@ impl Ext for FakeExt { unimplemented!(); } - fn suicide(&mut self, _refund_address: &Address) { + fn suicide(&mut self, _refund_address: &Address) -> trie::Result<()> { unimplemented!(); } diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index d287857a0..d9f1b7413 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -544,7 +544,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(0x7)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -555,9 +555,9 @@ mod tests { }; assert_eq!(gas_left, U256::from(79_975)); - assert_eq!(state.storage_at(&address, &H256::new()), H256::from(&U256::from(0xf9u64))); - assert_eq!(state.balance(&sender), U256::from(0xf9)); - assert_eq!(state.balance(&address), U256::from(0x7)); + assert_eq!(state.storage_at(&address, &H256::new()).unwrap(), H256::from(&U256::from(0xf9u64))); + assert_eq!(state.balance(&sender).unwrap(), U256::from(0xf9)); + assert_eq!(state.balance(&address).unwrap(), U256::from(0x7)); // 0 cause contract hasn't returned assert_eq!(substate.contracts_created.len(), 0); @@ -603,7 +603,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -662,7 +662,7 @@ mod tests { params.call_type = CallType::Call; let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(5); let mut substate = Substate::new(); @@ -773,7 +773,7 @@ mod tests { params.value = ActionValue::Transfer(100.into()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(5); let mut substate = Substate::new(); @@ -861,7 +861,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -913,7 +913,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(1024); let mut substate = Substate::new(); @@ -971,9 +971,9 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.init_code(&address_a, code_a.clone()); - state.init_code(&address_b, code_b.clone()); - state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty); + state.init_code(&address_a, code_a.clone()).unwrap(); + state.init_code(&address_b, code_b.clone()).unwrap(); + state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); @@ -985,7 +985,7 @@ mod tests { }; assert_eq!(gas_left, U256::from(73_237)); - assert_eq!(state.storage_at(&address_a, &H256::from(&U256::from(0x23))), H256::from(&U256::from(1))); + assert_eq!(state.storage_at(&address_a, &H256::from(&U256::from(0x23))).unwrap(), H256::from(&U256::from(1))); } // test is incorrect, mk @@ -1019,7 +1019,7 @@ mod tests { params.code = Some(Arc::new(code.clone())); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.init_code(&address, code); + state.init_code(&address, code).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -1030,8 +1030,8 @@ mod tests { }; assert_eq!(gas_left, U256::from(59_870)); - assert_eq!(state.storage_at(&address, &H256::from(&U256::zero())), H256::from(&U256::from(1))); - assert_eq!(state.storage_at(&address, &H256::from(&U256::one())), H256::from(&U256::from(1))); + assert_eq!(state.storage_at(&address, &H256::from(&U256::zero())).unwrap(), H256::from(&U256::from(1))); + assert_eq!(state.storage_at(&address, &H256::from(&U256::one())).unwrap(), H256::from(&U256::from(1))); } // test is incorrect, mk @@ -1052,7 +1052,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_limit = U256::from(100_000); let engine = TestEngine::new(0); @@ -1069,10 +1069,10 @@ mod tests { assert_eq!(executed.cumulative_gas_used, U256::from(41_301)); assert_eq!(executed.logs.len(), 0); assert_eq!(executed.contracts_created.len(), 0); - assert_eq!(state.balance(&sender), U256::from(1)); - assert_eq!(state.balance(&contract), U256::from(17)); - assert_eq!(state.nonce(&sender), U256::from(1)); - assert_eq!(state.storage_at(&contract, &H256::new()), H256::from(&U256::from(1))); + assert_eq!(state.balance(&sender).unwrap(), U256::from(1)); + assert_eq!(state.balance(&contract).unwrap(), U256::from(17)); + assert_eq!(state.nonce(&sender).unwrap(), U256::from(1)); + assert_eq!(state.storage_at(&contract, &H256::new()).unwrap(), H256::from(&U256::from(1))); } evm_test!{test_transact_invalid_nonce: test_transact_invalid_nonce_jit, test_transact_invalid_nonce_int} @@ -1090,7 +1090,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_limit = U256::from(100_000); let engine = TestEngine::new(0); @@ -1123,7 +1123,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_used = U256::from(20_000); info.gas_limit = U256::from(100_000); @@ -1158,7 +1158,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_limit = U256::from(100_000); let engine = TestEngine::new(0); @@ -1193,7 +1193,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index db4a587d1..893ba03be 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -506,7 +506,7 @@ mod tests { { let vm_factory = Default::default(); let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer); - ext.suicide(refund_account); + ext.suicide(refund_account).unwrap(); } assert_eq!(setup.sub_state.suicides.len(), 1); diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index f2e73ba97..844fa08f5 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -74,39 +74,39 @@ impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B> address: Address, tracer: &'a mut T, vm_tracer: &'a mut V, - ) -> Self { - TestExt { - contract_address: contract_address(&address, &state.nonce(&address)), + ) -> trie::Result { + Ok(TestExt { + contract_address: contract_address(&address, &state.nonce(&address)?), ext: Externalities::new(state, info, engine, vm_factory, depth, origin_info, substate, output, tracer, vm_tracer), callcreates: vec![] - } + }) } } impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> where T: Tracer, V: VMTracer, B: StateBackend { - fn storage_at(&self, key: &H256) -> H256 { + fn storage_at(&self, key: &H256) -> trie::Result { self.ext.storage_at(key) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.ext.set_storage(key, value) } - fn exists(&self, address: &Address) -> bool { + fn exists(&self, address: &Address) -> trie::Result { self.ext.exists(address) } - fn exists_and_not_null(&self, address: &Address) -> bool { + fn exists_and_not_null(&self, address: &Address) -> trie::Result { self.ext.exists_and_not_null(address) } - fn balance(&self, address: &Address) -> U256 { + fn balance(&self, address: &Address) -> trie::Result { self.ext.balance(address) } - fn origin_balance(&self) -> U256 { + fn origin_balance(&self) -> trie::Result { self.ext.origin_balance() } @@ -143,11 +143,11 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Arc { + fn extcode(&self, address: &Address) -> trie::Result> { self.ext.extcode(address) } - fn extcodesize(&self, address: &Address) -> usize { + fn extcodesize(&self, address: &Address) -> trie::Result { self.ext.extcodesize(address) } @@ -159,7 +159,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> self.ext.ret(gas, data) } - fn suicide(&mut self, refund_address: &Address) { + fn suicide(&mut self, refund_address: &Address) -> trie::Result<()> { self.ext.suicide(refund_address) } @@ -201,6 +201,19 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { fail = true }; + macro_rules! try_fail { + ($e: expr) => { + match $e { + Ok(x) => x, + Err(e) => { + let msg = format!("Internal error: {}", e); + fail_unless(false, &msg); + continue + } + } + } + } + let out_of_gas = vm.out_of_gas(); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -217,7 +230,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { // execute let (res, callcreates) = { - let mut ex = TestExt::new( + let mut ex = try_fail!(TestExt::new( &mut state, &info, &engine, @@ -229,7 +242,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { params.address.clone(), &mut tracer, &mut vm_tracer, - ); + )); let mut evm = vm_factory.create(params.gas); let res = evm.exec(params, &mut ex); // a return in finalize will not alter callcreates @@ -248,14 +261,19 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { for (address, account) in vm.post_state.unwrap().into_iter() { let address = address.into(); let code: Vec = account.code.into(); - fail_unless(state.code(&address).as_ref().map_or_else(|| code.is_empty(), |c| &**c == &code), "code is incorrect"); - fail_unless(state.balance(&address) == account.balance.into(), "balance is incorrect"); - fail_unless(state.nonce(&address) == account.nonce.into(), "nonce is incorrect"); - account.storage.into_iter().foreach(|(k, v)| { + let found_code = try_fail!(state.code(&address)); + let found_balance = try_fail!(state.balance(&address)); + let found_nonce = try_fail!(state.nonce(&address)); + + fail_unless(found_code.as_ref().map_or_else(|| code.is_empty(), |c| &**c == &code), "code is incorrect"); + fail_unless(found_balance == account.balance.into(), "balance is incorrect"); + fail_unless(found_nonce == account.nonce.into(), "nonce is incorrect"); + for (k, v) in account.storage { let key: U256 = k.into(); let value: U256 = v.into(); - fail_unless(state.storage_at(&address, &From::from(key)) == From::from(value), "storage is incorrect"); - }); + let found_storage = try_fail!(state.storage_at(&address, &From::from(key))); + fail_unless(found_storage == From::from(value), "storage is incorrect"); + } } let calls: Option> = vm.calls.map(|c| c.into_iter().map(From::from).collect()); diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 432c58025..99c22f88e 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -715,7 +715,8 @@ impl MinerService for Miner { let needed_balance = t.value + t.gas * t.gas_price; if balance < needed_balance { // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty); + state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) + .map_err(ExecutionError::from)?; } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; let mut ret = Executive::new(&mut state, &env_info, &*self.engine, client.vm_factory()).transact(t, options)?; diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index e7dc52055..74e1cb598 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -62,7 +62,7 @@ pub use self::work_notify::NotifyWork; pub use self::stratum::{Stratum, Error as StratumError, Options as StratumOptions}; use std::collections::BTreeMap; -use util::{H256, U256, Address, Bytes, trie}; +use util::{H256, U256, Address, Bytes}; use client::{MiningBlockChainClient, Executed, CallAnalytics}; use block::ClosedBlock; use header::BlockNumber; diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 85996d24b..67e21208b 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -389,6 +389,6 @@ mod tests { let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap(); let state = State::from_existing(db.boxed_clone(), spec.state_root(), spec.engine.account_start_nonce(), Default::default()).unwrap(); let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); - assert_eq!(state.storage_at(&Address::from_str("0000000000000000000000000000000000000005").unwrap(), &H256::zero()), expected); + assert_eq!(state.storage_at(&Address::from_str("0000000000000000000000000000000000000005").unwrap(), &H256::zero()).unwrap(), expected); } } diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index acd1591f7..ebdf36d89 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -483,7 +483,7 @@ mod tests { let rlp = { let mut a = Account::new_contract(69.into(), 0.into()); a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64))); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); a.init_code(vec![]); a.commit_code(&mut db); a.rlp() @@ -491,8 +491,8 @@ mod tests { let a = Account::from_rlp(&rlp); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); - assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x00u64))), H256::from(&U256::from(0x1234u64))); - assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x01u64))), H256::new()); + assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x00u64))).unwrap(), H256::from(&U256::from(0x1234u64))); + assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x01u64))).unwrap(), H256::new()); } #[test] @@ -521,7 +521,7 @@ mod tests { let mut db = AccountDBMut::new(&mut db, &Address::new()); a.set_storage(0.into(), 0x1234.into()); assert_eq!(a.storage_root(), None); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); } @@ -531,11 +531,11 @@ mod tests { let mut db = MemoryDB::new(); let mut db = AccountDBMut::new(&mut db, &Address::new()); a.set_storage(0.into(), 0x1234.into()); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); a.set_storage(1.into(), 0x1234.into()); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); a.set_storage(1.into(), 0.into()); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index b88a6a9f1..e25d7d404 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -573,7 +573,7 @@ impl State { let addr_hash = account.address_hash(address); { let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash); - account.commit_storage(&factories.trie, account_db.as_hashdb_mut()); + account.commit_storage(&factories.trie, account_db.as_hashdb_mut())?; account.commit_code(account_db.as_hashdb_mut()); } if !account.is_empty() { @@ -911,7 +911,7 @@ mod tests { data: FromHex::from_hex("601080600c6000396000f3006000355415600957005b60203560003555").unwrap(), }.sign(&secret(), None); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -941,13 +941,13 @@ mod tests { let temp = RandomTempPath::new(); let mut state = { let mut state = get_temp_state_in(temp.as_path()); - assert_eq!(state.exists(&a), false); - state.inc_nonce(&a); + assert_eq!(state.exists(&a).unwrap(), false); + state.inc_nonce(&a).unwrap(); state.commit().unwrap(); state.clone() }; - state.inc_nonce(&a); + state.inc_nonce(&a).unwrap(); state.commit().unwrap(); } @@ -971,7 +971,7 @@ mod tests { data: FromHex::from_hex("5b600056").unwrap(), }.sign(&secret(), None); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1008,8 +1008,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1051,7 +1051,7 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1135,7 +1135,7 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()); + state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()).unwrap(); let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1178,8 +1178,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1240,8 +1240,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); + state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1299,8 +1299,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1339,9 +1339,9 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1399,8 +1399,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1454,8 +1454,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()); // not enough funds. - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()).unwrap(); // not enough funds. + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1497,9 +1497,9 @@ mod tests { data: vec![],//600480600b6000396000f35b600056 }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1553,10 +1553,10 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()); - state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1628,10 +1628,10 @@ mod tests { data: vec![],//600480600b6000396000f35b600056 }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()); - state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()).unwrap(); + state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1701,9 +1701,9 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()); - state.add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty); - state.add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()).unwrap(); + state.add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty).unwrap(); + state.add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1740,16 +1740,16 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}); - state.init_code(&a, vec![1, 2, 3]); - assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); + state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}).unwrap(); + state.init_code(&a, vec![1, 2, 3]).unwrap(); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec()))); state.commit().unwrap(); - assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec()))); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec()))); } #[test] @@ -1758,13 +1758,13 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))); + state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))).unwrap(); state.commit().unwrap(); state.drop() }; let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))), H256::from(&U256::from(69u64))); + assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))).unwrap(), H256::from(&U256::from(69u64))); } #[test] @@ -1773,16 +1773,16 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.inc_nonce(&a); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); + state.inc_nonce(&a).unwrap(); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.balance(&a), U256::from(69u64)); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); } #[test] @@ -1790,16 +1790,16 @@ mod tests { let a = Address::zero(); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - assert_eq!(state.exists(&a), false); - assert_eq!(state.exists_and_not_null(&a), false); - state.inc_nonce(&a); - assert_eq!(state.exists(&a), true); - assert_eq!(state.exists_and_not_null(&a), true); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.exists_and_not_null(&a).unwrap(), false); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.exists_and_not_null(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); state.kill_account(&a); - assert_eq!(state.exists(&a), false); - assert_eq!(state.exists_and_not_null(&a), false); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.exists_and_not_null(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); } #[test] @@ -1809,13 +1809,13 @@ mod tests { let db = get_temp_state_db_in(path.as_path()); let (root, db) = { let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &U256::default(), CleanupMode::NoEmpty); // create an empty account + state.add_balance(&a, &U256::default(), CleanupMode::NoEmpty).unwrap(); // create an empty account state.commit().unwrap(); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert!(!state.exists(&a)); - assert!(!state.exists_and_not_null(&a)); + assert!(!state.exists(&a).unwrap()); + assert!(!state.exists_and_not_null(&a).unwrap()); } #[test] @@ -1825,13 +1825,13 @@ mod tests { let db = get_temp_state_db_in(path.as_path()); let (root, db) = { let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate); // create an empty account + state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate).unwrap(); // create an empty account state.commit().unwrap(); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert!(state.exists(&a)); - assert!(!state.exists_and_not_null(&a)); + assert!(state.exists(&a).unwrap()); + assert!(!state.exists_and_not_null(&a).unwrap()); } #[test] @@ -1840,27 +1840,27 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.inc_nonce(&a); + state.inc_nonce(&a).unwrap(); state.commit().unwrap(); - assert_eq!(state.exists(&a), true); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); state.drop() }; let (root, db) = { let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.exists(&a), true); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); state.kill_account(&a); state.commit().unwrap(); - assert_eq!(state.exists(&a), false); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.exists(&a), false); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); } #[test] @@ -1869,20 +1869,20 @@ mod tests { let mut state = state_result.reference_mut(); let a = Address::zero(); let b = 1u64.into(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(69u64)); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(69u64)); - state.sub_balance(&a, &U256::from(42u64)); - assert_eq!(state.balance(&a), U256::from(27u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.sub_balance(&a, &U256::from(42u64)).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(27u64)); - state.transfer_balance(&a, &b, &U256::from(18u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(9u64)); - assert_eq!(state.balance(&b), U256::from(18u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); + state.transfer_balance(&a, &b, &U256::from(18u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); + assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(9u64)); - assert_eq!(state.balance(&b), U256::from(18u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); + assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); } #[test] @@ -1890,16 +1890,16 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); let a = Address::zero(); - state.inc_nonce(&a); - assert_eq!(state.nonce(&a), U256::from(1u64)); - state.inc_nonce(&a); - assert_eq!(state.nonce(&a), U256::from(2u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); state.commit().unwrap(); - assert_eq!(state.nonce(&a), U256::from(2u64)); - state.inc_nonce(&a); - assert_eq!(state.nonce(&a), U256::from(3u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); state.commit().unwrap(); - assert_eq!(state.nonce(&a), U256::from(3u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); } #[test] @@ -1907,11 +1907,11 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); let a = Address::zero(); - assert_eq!(state.balance(&a), U256::from(0u64)); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(0u64)); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); } #[test] @@ -1919,7 +1919,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); let a = Address::zero(); - state.require(&a, false); + state.require(&a, false).unwrap(); state.commit().unwrap(); assert_eq!(state.root().hex(), "0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785"); } @@ -1930,15 +1930,15 @@ mod tests { let mut state = state_result.reference_mut(); let a = Address::zero(); state.checkpoint(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(69u64)); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.discard_checkpoint(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.checkpoint(); - state.add_balance(&a, &U256::from(1u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(70u64)); + state.add_balance(&a, &U256::from(1u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(70u64)); state.revert_to_checkpoint(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); } #[test] @@ -1948,12 +1948,12 @@ mod tests { let a = Address::zero(); state.checkpoint(); state.checkpoint(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(69u64)); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.discard_checkpoint(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.revert_to_checkpoint(); - assert_eq!(state.balance(&a), U256::from(0)); + assert_eq!(state.balance(&a).unwrap(), U256::from(0)); } #[test] @@ -1970,14 +1970,14 @@ mod tests { let mut state = state.reference().clone(); let a: Address = 0xa.into(); - state.init_code(&a, b"abcdefg".to_vec()); - state.add_balance(&a, &256.into(), CleanupMode::NoEmpty); - state.set_storage(&a, 0xb.into(), 0xc.into()); + state.init_code(&a, b"abcdefg".to_vec()).unwrap();; + state.add_balance(&a, &256.into(), CleanupMode::NoEmpty).unwrap(); + state.set_storage(&a, 0xb.into(), 0xc.into()).unwrap(); let mut new_state = state.clone(); - new_state.set_storage(&a, 0xb.into(), 0xd.into()); + new_state.set_storage(&a, 0xb.into(), 0xd.into()).unwrap(); - new_state.diff_from(state); + new_state.diff_from(state).unwrap(); } } diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index d37551231..809604b13 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -292,7 +292,7 @@ fn change_history_size() { for _ in 0..20 { let mut b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]); - b.block_mut().fields_mut().state.add_balance(&address, &5.into(), CleanupMode::NoEmpty); + b.block_mut().fields_mut().state.add_balance(&address, &5.into(), CleanupMode::NoEmpty).unwrap(); b.block_mut().fields_mut().state.commit().unwrap(); let b = b.close_and_lock().seal(&*test_spec.engine, vec![]).unwrap(); client.import_sealed_block(b).unwrap(); // account change is in the journal overlay @@ -307,7 +307,7 @@ fn change_history_size() { Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected(), ).unwrap(); - assert_eq!(client.state().balance(&address), 100.into()); + assert_eq!(client.state().balance(&address).unwrap(), 100.into()); } #[test] From f169c8dbb05789abafb252c6fb71a6aad34c12f1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 19:17:05 +0100 Subject: [PATCH 03/91] fix remainder of build --- evmbin/src/ext.rs | 23 ++++++++-------- rpc/src/v1/helpers/errors.rs | 5 ++++ rpc/src/v1/impls/eth.rs | 32 ++++++++++++++++++++--- rpc/src/v1/tests/eth.rs | 1 - rpc/src/v1/tests/helpers/miner_service.rs | 29 ++++++++++++++------ 5 files changed, 66 insertions(+), 24 deletions(-) diff --git a/evmbin/src/ext.rs b/evmbin/src/ext.rs index 6492f4fdc..bcce9adc1 100644 --- a/evmbin/src/ext.rs +++ b/evmbin/src/ext.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::collections::HashMap; -use util::{U256, H256, Address, Bytes, FixedHash}; +use util::{U256, H256, Address, Bytes, FixedHash, trie}; use ethcore::client::EnvInfo; use ethcore::evm::{self, Ext, ContractCreateResult, MessageCallResult, Schedule, CallType}; @@ -39,27 +39,28 @@ impl Default for FakeExt { } impl Ext for FakeExt { - fn storage_at(&self, key: &H256) -> H256 { - self.store.get(key).unwrap_or(&H256::new()).clone() + fn storage_at(&self, key: &H256) -> trie::Result { + Ok(self.store.get(key).unwrap_or(&H256::new()).clone()) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.store.insert(key, value); + Ok(()) } - fn exists(&self, _address: &Address) -> bool { + fn exists(&self, _address: &Address) -> trie::Result { unimplemented!(); } - fn exists_and_not_null(&self, _address: &Address) -> bool { + fn exists_and_not_null(&self, _address: &Address) -> trie::Result { unimplemented!(); } - fn origin_balance(&self) -> U256 { + fn origin_balance(&self) -> trie::Result { unimplemented!(); } - fn balance(&self, _address: &Address) -> U256 { + fn balance(&self, _address: &Address) -> trie::Result { unimplemented!(); } @@ -83,11 +84,11 @@ impl Ext for FakeExt { unimplemented!(); } - fn extcode(&self, _address: &Address) -> Arc { + fn extcode(&self, _address: &Address) -> trie::Result> { unimplemented!(); } - fn extcodesize(&self, _address: &Address) -> usize { + fn extcodesize(&self, _address: &Address) -> trie::Result { unimplemented!(); } @@ -99,7 +100,7 @@ impl Ext for FakeExt { Ok(*gas) } - fn suicide(&mut self, _refund_address: &Address) { + fn suicide(&mut self, _refund_address: &Address) -> trie::Result<()> { unimplemented!(); } diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 93d23b1aa..7b0891246 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -124,6 +124,10 @@ pub fn state_pruned() -> Error { } } +pub fn state_corrupt() -> Error { + internal("State corrupt", "") +} + pub fn exceptional() -> Error { Error { code: ErrorCode::ServerError(codes::EXCEPTION_ERROR), @@ -288,6 +292,7 @@ pub fn from_rlp_error(error: DecoderError) -> Error { pub fn from_call_error(error: CallError) -> Error { match error { CallError::StatePruned => state_pruned(), + CallError::StateCorrupt => state_corrupt(), CallError::Exceptional => exceptional(), CallError::Execution(e) => execution(e), CallError::TransactionNotFound => internal("{}, this should not be the case with eth_call, most likely a bug.", CallError::TransactionNotFound), diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 01627ba28..f47ab2055 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -349,7 +349,13 @@ impl Eth for EthClient where let address = address.into(); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).balance(&*take_weakf!(self.client), &address).into()), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).balance(&*client, &address) { + Some(balance) => Ok(balance.into()), + None => Err(errors::internal("Unable to load balance from database", "")) + } + } id => { let client = take_weakf!(self.client); @@ -369,7 +375,13 @@ impl Eth for EthClient where let position: U256 = RpcU256::into(pos); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).storage_at(&*take_weakf!(self.client), &address, &H256::from(position)).into()), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).storage_at(&*client, &address, &H256::from(position)) { + Some(s) => Ok(s.into()), + None => Err(errors::internal("Unable to load storage from database", "")) + } + } id => { let client = take_weakf!(self.client); @@ -387,7 +399,13 @@ impl Eth for EthClient where fn transaction_count(&self, address: RpcH160, num: Trailing) -> BoxFuture { let address: Address = RpcH160::into(address); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).nonce(&*take_weakf!(self.client), &address).into()), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).nonce(&*client, &address) { + Some(nonce) => Ok(nonce.into()), + None => Err(errors::internal("Unable to load nonce from database", "")) + } + } id => { let client = take_weakf!(self.client); @@ -437,7 +455,13 @@ impl Eth for EthClient where let address: Address = RpcH160::into(address); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).code(&*take_weakf!(self.client), &address).map_or_else(Bytes::default, Bytes::new)), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).code(&*client, &address) { + Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), + None => Err(errors::internal("Unable to load code from database", "")) + } + } id => { let client = take_weakf!(self.client); diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 6b937d733..c505a2d5d 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -18,7 +18,6 @@ use std::sync::Arc; use std::time::Duration; -use devtools::RandomTempPath; use ethcore::client::{BlockChainClient, Client, ClientConfig}; use ethcore::ids::BlockId; use ethcore::spec::{Genesis, Spec}; diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index 75ca928b4..01dd9edc7 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -254,26 +254,39 @@ impl MinerService for TestMinerService { unimplemented!(); } - fn balance(&self, _chain: &MiningBlockChainClient, address: &Address) -> U256 { - self.latest_closed_block.lock().as_ref().map_or_else(U256::zero, |b| b.block().fields().state.balance(address).clone()) + fn balance(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { + self.latest_closed_block.lock() + .as_ref() + .map(|b| b.block().fields().state.balance(address)) + .map(|b| b.ok()) + .unwrap_or(Some(U256::default())) } fn call(&self, _chain: &MiningBlockChainClient, _t: &SignedTransaction, _analytics: CallAnalytics) -> Result { unimplemented!(); } - fn storage_at(&self, _chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { - self.latest_closed_block.lock().as_ref().map_or_else(H256::default, |b| b.block().fields().state.storage_at(address, position).clone()) + fn storage_at(&self, _chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option { + self.latest_closed_block.lock() + .as_ref() + .map(|b| b.block().fields().state.storage_at(address, position)) + .map(|s| s.ok()) + .unwrap_or(Some(H256::default())) } - fn nonce(&self, _chain: &MiningBlockChainClient, address: &Address) -> U256 { + fn nonce(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { // we assume all transactions are in a pending block, ignoring the // reality of gas limits. - self.last_nonce(address).unwrap_or(U256::zero()) + Some(self.last_nonce(address).unwrap_or(U256::zero())) } - fn code(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { - self.latest_closed_block.lock().as_ref().map_or(None, |b| b.block().fields().state.code(address).map(|c| (*c).clone())) + fn code(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option> { + self.latest_closed_block.lock() + .as_ref() + .map(|b| b.block().fields().state.code(address)) + .map(|c| c.ok()) + .unwrap_or(None) + .map(|c| c.map(|c| (&*c).clone())) } fn sensible_gas_price(&self) -> U256 { From ddbdfafc0525c7c464038c3baba3d29778dd34ed Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 23:10:29 +0100 Subject: [PATCH 04/91] buffer flow -> request credits --- ethcore/light/src/net/error.rs | 8 +- ethcore/light/src/net/mod.rs | 74 +++++++++---------- .../{buffer_flow.rs => request_credits.rs} | 74 +++++++++---------- ethcore/light/src/net/status.rs | 8 +- ethcore/light/src/net/tests/mod.rs | 28 +++---- sync/src/light_sync/tests/test_net.rs | 2 +- 6 files changed, 97 insertions(+), 97 deletions(-) rename ethcore/light/src/net/{buffer_flow.rs => request_credits.rs} (82%) diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index 627a7ef0f..dda78e0b6 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -44,8 +44,8 @@ pub enum Error { Rlp(DecoderError), /// A network error. Network(NetworkError), - /// Out of buffer. - BufferEmpty, + /// Out of credits. + NoCredits, /// Unrecognized packet code. UnrecognizedPacket(u8), /// Unexpected handshake. @@ -72,7 +72,7 @@ impl Error { match *self { Error::Rlp(_) => Punishment::Disable, Error::Network(_) => Punishment::None, - Error::BufferEmpty => Punishment::Disable, + Error::NoCredits => Punishment::Disable, Error::UnrecognizedPacket(_) => Punishment::Disconnect, Error::UnexpectedHandshake => Punishment::Disconnect, Error::WrongNetwork => Punishment::Disable, @@ -103,7 +103,7 @@ impl fmt::Display for Error { match *self { Error::Rlp(ref err) => err.fmt(f), Error::Network(ref err) => err.fmt(f), - Error::BufferEmpty => write!(f, "Out of buffer"), + Error::NoCredits => write!(f, "Out of request credits"), Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code), Error::UnexpectedHandshake => write!(f, "Unexpected handshake"), Error::WrongNetwork => write!(f, "Wrong network"), diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 898934965..2ffaffa64 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -37,7 +37,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; use request::{self, HashOrNumber, Request}; -use self::buffer_flow::{Buffer, FlowParams}; +use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; use self::error::Punishment; use self::request_set::RequestSet; @@ -51,7 +51,7 @@ mod request_set; #[cfg(test)] mod tests; -pub mod buffer_flow; +pub mod request_credits; pub use self::error::Error; pub use self::context::{BasicContext, EventContext, IoContext}; @@ -143,10 +143,10 @@ struct PendingPeer { /// Relevant data to each peer. Not accessible publicly, only `pub` due to /// limitations of the privacy system. pub struct Peer { - local_buffer: Buffer, // their buffer relative to us + local_credits: Credits, // their credits relative to us status: Status, capabilities: Capabilities, - remote_flow: Option<(Buffer, FlowParams)>, + remote_flow: Option<(Credits, FlowParams)>, sent_head: H256, // last chain head we've given them. last_update: SteadyTime, pending_requests: RequestSet, @@ -155,21 +155,21 @@ pub struct Peer { impl Peer { // check the maximum cost of a request, returning an error if there's - // not enough buffer left. + // not enough credits left. // returns the calculated maximum cost. fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result { - flow_params.recharge(&mut self.local_buffer); + flow_params.recharge(&mut self.local_credits); let max_cost = flow_params.compute_cost(kind, max); - self.local_buffer.deduct_cost(max_cost)?; + self.local_credits.deduct_cost(max_cost)?; Ok(max_cost) } - // refund buffer for a request. returns new buffer amount. + // refund credits for a request. returns new amount of credits. fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { - flow_params.refund(&mut self.local_buffer, amount); + flow_params.refund(&mut self.local_credits, amount); - self.local_buffer.current() + self.local_credits.current() } } @@ -218,7 +218,7 @@ pub trait Handler: Send + Sync { pub struct Params { /// Network id. pub network_id: u64, - /// Buffer flow parameters. + /// Request credits parameters. pub flow_params: FlowParams, /// Initial capabilities. pub capabilities: Capabilities, @@ -324,14 +324,14 @@ impl LightProtocol { /// Check the maximum amount of requests of a specific type /// which a peer would be able to serve. Returns zero if the - /// peer is unknown or has no buffer flow parameters. + /// peer is unknown or has no credit parameters. fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { self.peers.read().get(&peer).and_then(|peer| { let mut peer = peer.lock(); match peer.remote_flow { - Some((ref mut buf, ref flow)) => { - flow.recharge(buf); - Some(flow.max_amount(&*buf, kind)) + Some((ref mut c, ref flow)) => { + flow.recharge(c); + Some(flow.max_amount(&*c, kind)) } None => None, } @@ -341,7 +341,7 @@ impl LightProtocol { /// Make a request to a peer. /// /// Fails on: nonexistent peer, network error, peer not server, - /// insufficient buffer. Does not check capabilities before sending. + /// insufficient credits. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { @@ -350,10 +350,10 @@ impl LightProtocol { let mut peer = peer.lock(); match peer.remote_flow { - Some((ref mut buf, ref flow)) => { - flow.recharge(buf); + Some((ref mut c, ref flow)) => { + flow.recharge(c); let max = flow.compute_cost(request.kind(), request.amount()); - buf.deduct_cost(max)?; + c.deduct_cost(max)?; } None => return Err(Error::NotServer), } @@ -454,7 +454,7 @@ impl LightProtocol { // - check whether request kinds match fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result { let req_id = ReqId(raw.val_at(0)?); - let cur_buffer: U256 = raw.val_at(1)?; + let cur_credits: U256 = raw.val_at(1)?; trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind); @@ -470,9 +470,9 @@ impl LightProtocol { (Some(request), Some(flow_info)) => { had_req = true; - let &mut (ref mut buf, ref mut flow) = flow_info; - let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit()); - buf.update_to(actual_buffer); + let &mut (ref mut c, ref mut flow) = flow_info; + let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); + c.update_to(actual_credits); if request.kind() != kind { Some(Error::UnsolicitedResponse) @@ -675,10 +675,10 @@ impl LightProtocol { return Err(Error::BadProtocolVersion); } - let remote_flow = flow_params.map(|params| (params.create_buffer(), params)); + let remote_flow = flow_params.map(|params| (params.create_credits(), params)); self.peers.write().insert(*peer, Mutex::new(Peer { - local_buffer: self.flow_params.create_buffer(), + local_credits: self.flow_params.create_credits(), status: status.clone(), capabilities: capabilities.clone(), remote_flow: remote_flow, @@ -783,10 +783,10 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len()); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::BLOCK_HEADERS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for header in response { stream.append_raw(&header.into_inner(), 1); @@ -845,11 +845,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::BLOCK_BODIES, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for body in response { match body { @@ -911,11 +911,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::RECEIPTS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for receipts in response { stream.append_raw(&receipts, 1); @@ -985,11 +985,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::PROOFS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for proof in response { stream.append_raw(&proof, 1); @@ -1057,11 +1057,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::CONTRACT_CODES, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for code in response { stream.append(&code); @@ -1130,11 +1130,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::HEADER_PROOFS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for proof in response { stream.append_raw(&proof, 1); diff --git a/ethcore/light/src/net/buffer_flow.rs b/ethcore/light/src/net/request_credits.rs similarity index 82% rename from ethcore/light/src/net/buffer_flow.rs rename to ethcore/light/src/net/request_credits.rs index cce54da59..4f5d79504 100644 --- a/ethcore/light/src/net/buffer_flow.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! LES buffer flow management. +//! Request credit management. //! -//! Every request in the LES protocol leads to a reduction -//! of the requester's buffer value as a rate-limiting mechanism. -//! This buffer value will recharge at a set rate. +//! Every request in the light protocol leads to a reduction +//! of the requester's amount of credits as a rate-limiting mechanism. +//! The amount of credits will recharge at a set rate. //! -//! This module provides an interface for configuration of buffer -//! flow costs and recharge rates. +//! This module provides an interface for configuration of +//! costs and recharge rates of request credits. //! //! Current default costs are picked completely arbitrarily, not based //! on any empirical timings or mathematical models. @@ -38,19 +38,19 @@ use time::{Duration, SteadyTime}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct Cost(pub U256, pub U256); -/// Buffer value. +/// Credits value. /// /// Produced and recharged using `FlowParams`. /// Definitive updates can be made as well -- these will reset the recharge /// point to the time of the update. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Buffer { +pub struct Credits { estimate: U256, recharge_point: SteadyTime, } -impl Buffer { - /// Get the current buffer value. +impl Credits { + /// Get the current amount of credits.. pub fn current(&self) -> U256 { self.estimate.clone() } /// Make a definitive update. @@ -61,7 +61,7 @@ impl Buffer { self.recharge_point = SteadyTime::now(); } - /// Attempt to apply the given cost to the buffer. + /// Attempt to apply the given cost to the amount of credits. /// /// If successful, the cost will be deducted successfully. /// @@ -69,7 +69,7 @@ impl Buffer { /// error will be produced. pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> { match cost > self.estimate { - true => Err(Error::BufferEmpty), + true => Err(Error::NoCredits), false => { self.estimate = self.estimate - cost; Ok(()) @@ -165,7 +165,7 @@ impl RlpDecodable for CostTable { } } -/// A buffer-flow manager handles costs, recharge, limits +/// Handles costs, recharge, limits of request credits. #[derive(Debug, Clone, PartialEq)] pub struct FlowParams { costs: CostTable, @@ -175,7 +175,7 @@ pub struct FlowParams { impl FlowParams { /// Create new flow parameters from a request cost table, - /// buffer limit, and (minimum) rate of recharge. + /// credit limit, and (minimum) rate of recharge. pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self { FlowParams { costs: costs, @@ -201,7 +201,7 @@ impl FlowParams { } } - /// Get a reference to the buffer limit. + /// Get a reference to the credit limit. pub fn limit(&self) -> &U256 { &self.limit } /// Get a reference to the cost table. @@ -227,10 +227,10 @@ impl FlowParams { } /// Compute the maximum number of costs of a specific kind which can be made - /// with the given buffer. + /// with the given amount of credits /// Saturates at `usize::max()`. This is not a problem in practice because /// this amount of requests is already prohibitively large. - pub fn max_amount(&self, buffer: &Buffer, kind: request::Kind) -> usize { + pub fn max_amount(&self, credits: &Credits, kind: request::Kind) -> usize { use util::Uint; use std::usize; @@ -243,7 +243,7 @@ impl FlowParams { request::Kind::HeaderProofs => &self.costs.header_proofs, }; - let start = buffer.current(); + let start = credits.current(); if start <= cost.0 { return 0; @@ -259,36 +259,36 @@ impl FlowParams { } } - /// Create initial buffer parameter. - pub fn create_buffer(&self) -> Buffer { - Buffer { + /// Create initial credits.. + pub fn create_credits(&self) -> Credits { + Credits { estimate: self.limit, recharge_point: SteadyTime::now(), } } - /// Recharge the buffer based on time passed since last + /// Recharge the given credits based on time passed since last /// update. - pub fn recharge(&self, buf: &mut Buffer) { + pub fn recharge(&self, credits: &mut Credits) { let now = SteadyTime::now(); // recompute and update only in terms of full seconds elapsed // in order to keep the estimate as an underestimate. - let elapsed = (now - buf.recharge_point).num_seconds(); - buf.recharge_point = buf.recharge_point + Duration::seconds(elapsed); + let elapsed = (now - credits.recharge_point).num_seconds(); + credits.recharge_point = credits.recharge_point + Duration::seconds(elapsed); let elapsed: U256 = elapsed.into(); - buf.estimate = ::std::cmp::min(self.limit, buf.estimate + (elapsed * self.recharge)); + credits.estimate = ::std::cmp::min(self.limit, credits.estimate + (elapsed * self.recharge)); } - /// Refund some buffer which was previously deducted. + /// Refund some credits which were previously deducted. /// Does not update the recharge timestamp. - pub fn refund(&self, buf: &mut Buffer, refund_amount: U256) { - buf.estimate = buf.estimate + refund_amount; + pub fn refund(&self, credits: &mut Credits, refund_amount: U256) { + credits.estimate = credits.estimate + refund_amount; - if buf.estimate > self.limit { - buf.estimate = self.limit + if credits.estimate > self.limit { + credits.estimate = self.limit } } } @@ -318,20 +318,20 @@ mod tests { } #[test] - fn buffer_mechanism() { + fn credits_mechanism() { use std::thread; use std::time::Duration; let flow_params = FlowParams::new(100.into(), Default::default(), 20.into()); - let mut buffer = flow_params.create_buffer(); + let mut credits = flow_params.create_credits(); - assert!(buffer.deduct_cost(101.into()).is_err()); - assert!(buffer.deduct_cost(10.into()).is_ok()); + assert!(credits.deduct_cost(101.into()).is_err()); + assert!(credits.deduct_cost(10.into()).is_ok()); thread::sleep(Duration::from_secs(1)); - flow_params.recharge(&mut buffer); + flow_params.recharge(&mut credits); - assert_eq!(buffer.estimate, 100.into()); + assert_eq!(credits.estimate, 100.into()); } } diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index 655dc404f..3e32f6609 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -19,7 +19,7 @@ use rlp::{DecoderError, RlpDecodable, RlpEncodable, RlpStream, Stream, UntrustedRlp, View}; use util::{H256, U256}; -use super::buffer_flow::FlowParams; +use super::request_credits::FlowParams; // recognized handshake/announcement keys. // unknown keys are to be skipped, known keys have a defined order. @@ -207,7 +207,7 @@ impl Capabilities { /// Attempt to parse a handshake message into its three parts: /// - chain status /// - serving capabilities -/// - buffer flow parameters +/// - request credit parameters pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, Option), DecoderError> { let mut parser = Parser { pos: 0, @@ -300,7 +300,7 @@ pub struct Announcement { pub serve_chain_since: Option, /// optional new transaction-relay capability. false means "no change" pub tx_relay: bool, - // TODO: changes in buffer flow? + // TODO: changes in request credits. } /// Parse an announcement. @@ -372,7 +372,7 @@ pub fn write_announcement(announcement: &Announcement) -> Vec { #[cfg(test)] mod tests { use super::*; - use super::super::buffer_flow::FlowParams; + use super::super::request_credits::FlowParams; use util::{U256, H256, FixedHash}; use rlp::{RlpStream, Stream ,UntrustedRlp, View}; diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 47d73aef2..4efa6f680 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -24,7 +24,7 @@ use ethcore::transaction::PendingTransaction; use ethcore::encoded; use network::{PeerId, NodeId}; -use net::buffer_flow::FlowParams; +use net::request_credits::FlowParams; use net::context::IoContext; use net::status::{Capabilities, Status, write_handshake}; use net::{encode_request, LightProtocol, Params, packet, Peer}; @@ -203,7 +203,7 @@ fn genesis_mismatch() { } #[test] -fn buffer_overflow() { +fn credit_overflow() { let flow_params = make_flow_params(); let capabilities = capabilities(); @@ -268,11 +268,11 @@ fn get_block_headers() { let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(headers.len(), 10); - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(10); + response_stream.append(&req_id).append(&new_creds).begin_list(10); for header in headers { response_stream.append_raw(&header.into_inner(), 1); } @@ -317,11 +317,11 @@ fn get_block_bodies() { let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(bodies.len(), 10); - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(10); + response_stream.append(&req_id).append(&new_creds).begin_list(10); for body in bodies { response_stream.append_raw(&body.into_inner(), 1); } @@ -371,11 +371,11 @@ fn get_block_receipts() { .map(|hash| provider.client.block_receipts(hash).unwrap()) .collect(); - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(receipts.len()); + response_stream.append(&req_id).append(&new_creds).begin_list(receipts.len()); for block_receipts in receipts { response_stream.append_raw(&block_receipts, 1); } @@ -420,11 +420,11 @@ fn get_state_proofs() { vec![::util::sha3::SHA3_NULL_RLP.to_vec()], ]; - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(2); + response_stream.append(&req_id).append(&new_creds).begin_list(2); for proof in proofs { response_stream.begin_list(proof.len()); for node in proof { @@ -472,11 +472,11 @@ fn get_contract_code() { key2.iter().chain(key2.iter()).cloned().collect(), ]; - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(2); + response_stream.append(&req_id).append(&new_creds).begin_list(2); for code in codes { response_stream.append(&code); } @@ -515,10 +515,10 @@ fn id_guard() { pending_requests.insert(req_id_2, req, ::time::SteadyTime::now()); proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { - local_buffer: flow_params.create_buffer(), + local_credits: flow_params.create_credits(), status: status(provider.client.chain_info()), capabilities: capabilities.clone(), - remote_flow: Some((flow_params.create_buffer(), flow_params)), + remote_flow: Some((flow_params.create_credits(), flow_params)), sent_head: provider.client.chain_info().best_block_hash, last_update: ::time::SteadyTime::now(), pending_requests: pending_requests, diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index b73da48bb..d0e472374 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -27,7 +27,7 @@ use ethcore::spec::Spec; use io::IoChannel; use light::client::Client as LightClient; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; -use light::net::buffer_flow::FlowParams; +use light::net::request_credits::FlowParams; use network::{NodeId, PeerId}; use util::RwLock; From ee7779df171e69fe2eb5f022b9f8e119cb55aa84 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 24 Feb 2017 20:16:32 +0100 Subject: [PATCH 05/91] proving state backend --- ethcore/src/state/backend.rs | 94 +++++++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index 81a770fe7..dfb4465fa 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -21,10 +21,12 @@ //! should become general over time to the point where not even a //! merkle trie is strictly necessary. +use std::collections::{HashSet, HashMap}; use std::sync::Arc; use state::Account; -use util::{Address, AsHashDB, HashDB, H256}; +use util::{Address, MemoryDB, Mutex, H256}; +use util::hashdb::{AsHashDB, HashDB, DBValue}; /// State backend. See module docs for more details. pub trait Backend: Send { @@ -91,3 +93,93 @@ impl Backend for NoCache { fn note_non_null_account(&self, _address: &Address) {} fn is_known_null(&self, _address: &Address) -> bool { false } } + +/// Proving state backend. +/// See module docs for more details. +/// +/// This doesn't cache anything or rely on the canonical state caches. +#[derive(Debug, Clone, PartialEq)] +pub struct Proving { + base: H, // state we're proving values from. + changed: MemoryDB, // changed state via insertions. + proof: Mutex>, +} + +impl HashDB for Proving { + fn keys(&self) -> HashMap { + self.base.as_hashdb().keys() + .extend(self.changed.keys()) + } + + fn get(&self, key: &H256) -> Option { + match self.base.as_hashdb().get(key) { + Some(val) => { + self.proof.lock().insert(val.clone()); + Some(val) + } + None => self.changed.get(key) + } + } + + fn contains(&self, key: &H256) -> bool { + self.get(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.changed.insert(value) + } + + fn emplace(&mut self, key: H256, value: DBValue) { + self.changed.emplace(key, value) + } + + fn remove(&mut self, key: &H256) { + // only remove from `changed` + if self.changed.contains(key) { + self.changed.remove(key) + } + } +} + +impl Backend for Proving { + fn as_hashdb(&self) -> &HashDB { + self + } + + fn as_hashdb_mut(&mut self) -> &mut HashDB { + self + } + + fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) { } + + fn cache_code(&self, _: H256, _: Arc>) { } + + fn get_cached_account(&self, _: &Address) -> Option> { None } + + fn get_cached(&self, _: &Address, _: F) -> Option + where F: FnOnce(Option<&mut Account>) -> U + { + None + } + + fn get_cached_code(&self, _: &H256) -> Option>> { None } + fn note_non_null_account(&self, _: &Address) { } + fn is_known_null(&self, _: &Address) -> bool { false } +} + +impl Proving { + /// Create a new `Proving` over a base database. + /// This will store all values ever fetched from that base. + pub fn new(base: H) -> Self { + Proving { + base: base, + changed: MemoryDB::new(), + proof: Mutex::new(HashSet::new()), + } + } + + /// Consume the backend, extracting the gathered proof. + pub fn extract_proof(self) -> Vec { + self.proof.into_inner().into_iter().collect() + } +} From 92e5982127548aed46b92354bde2c4916e0352a7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 00:27:48 +0100 Subject: [PATCH 06/91] generate transaction proofs from provider --- ethcore/light/src/client/mod.rs | 6 +- ethcore/light/src/net/mod.rs | 29 +++++++- ethcore/light/src/net/request_credits.rs | 10 ++- ethcore/light/src/net/request_set.rs | 1 + ethcore/light/src/provider.rs | 30 +++++++- ethcore/light/src/types/les_request.rs | 33 ++++++++- ethcore/src/client/client.rs | 92 ++++++++++++------------ ethcore/src/client/test_client.rs | 4 ++ ethcore/src/client/traits.rs | 4 ++ ethcore/src/state/backend.rs | 18 +++-- ethcore/src/state/mod.rs | 13 ++++ util/src/hashdb.rs | 10 +++ 12 files changed, 195 insertions(+), 55 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index a113b4367..9626f9f6c 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{Bytes, H256, Mutex, RwLock}; +use util::{Bytes, DBValue, H256, Mutex, RwLock}; use self::header_chain::HeaderChain; @@ -293,6 +293,10 @@ impl ::provider::Provider for Client { None } + fn transaction_proof(&self, _req: ::request::TransactionProof) -> Option> { + None + } + fn ready_transactions(&self) -> Vec<::ethcore::transaction::PendingTransaction> { Vec::new() } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 2ffaffa64..ad1eaac00 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -19,7 +19,7 @@ //! This uses a "Provider" to answer requests. //! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) -use ethcore::transaction::UnverifiedTransaction; +use ethcore::transaction::{Action, UnverifiedTransaction}; use ethcore::receipt::Receipt; use io::TimerToken; @@ -73,7 +73,7 @@ pub const PROTOCOL_VERSIONS: &'static [u8] = &[1]; pub const MAX_PROTOCOL_VERSION: u8 = 1; /// Packet count for LES. -pub const PACKET_COUNT: u8 = 15; +pub const PACKET_COUNT: u8 = 17; // packet ID definitions. mod packet { @@ -109,6 +109,10 @@ mod packet { // request and response for header proofs in a CHT. pub const GET_HEADER_PROOFS: u8 = 0x0d; pub const HEADER_PROOFS: u8 = 0x0e; + + // request and response for transaction proof. + pub const GET_TRANSACTION_PROOF: u8 = 0x0f; + pub const TRANSACTION_PROOF: u8 = 0x10; } // timeouts for different kinds of requests. all values are in milliseconds. @@ -121,6 +125,7 @@ mod timeout { pub const PROOFS: i64 = 4000; pub const CONTRACT_CODES: i64 = 5000; pub const HEADER_PROOFS: i64 = 3500; + pub const TRANSACTION_PROOF: i64 = 5000; } /// A request id. @@ -370,6 +375,7 @@ impl LightProtocol { request::Kind::StateProofs => packet::GET_PROOFS, request::Kind::Codes => packet::GET_CONTRACT_CODES, request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS, + request::Kind::TransactionProof => packet::GET_TRANSACTION_PROOF, }; io.send(*peer_id, packet_id, packet_data); @@ -1320,6 +1326,25 @@ fn encode_request(req: &Request, req_id: usize) -> Vec { .append(&proof_req.from_level); } + stream.out() + } + Request::TransactionProof(ref request) => { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id).begin_list(7) + .append(&request.at) + .append(&request.from); + + match request.action { + Action::Create => stream.append_empty_data(), + Action::Call(ref to) => stream.append(to), + }; + + stream + .append(&request.gas) + .append(&request.gas_price) + .append(&request.value) + .append(&request.data); + stream.out() } } diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 4f5d79504..3a1cb9996 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -81,12 +81,13 @@ impl Credits { /// A cost table, mapping requests to base and per-request costs. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CostTable { - headers: Cost, + headers: Cost, // cost per header bodies: Cost, receipts: Cost, state_proofs: Cost, contract_codes: Cost, header_proofs: Cost, + transaction_proof: Cost, // cost per gas. } impl Default for CostTable { @@ -99,6 +100,7 @@ impl Default for CostTable { state_proofs: Cost(250000.into(), 25000.into()), contract_codes: Cost(200000.into(), 20000.into()), header_proofs: Cost(150000.into(), 15000.into()), + transaction_proof: Cost(100000.into(), 2.into()), } } } @@ -133,6 +135,7 @@ impl RlpDecodable for CostTable { let mut state_proofs = None; let mut contract_codes = None; let mut header_proofs = None; + let mut transaction_proof = None; for row in rlp.iter() { let msg_id: u8 = row.val_at(0)?; @@ -150,6 +153,7 @@ impl RlpDecodable for CostTable { packet::GET_PROOFS => state_proofs = Some(cost), packet::GET_CONTRACT_CODES => contract_codes = Some(cost), packet::GET_HEADER_PROOFS => header_proofs = Some(cost), + packet::GET_TRANSACTION_PROOF => transaction_proof = Some(cost), _ => return Err(DecoderError::Custom("Unrecognized message in cost table")), } } @@ -161,6 +165,7 @@ impl RlpDecodable for CostTable { state_proofs: state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))?, contract_codes: contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))?, header_proofs: header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))?, + transaction_proof: transaction_proof.ok_or(DecoderError::Custom("No transaction proof gas cost specified"))?, }) } } @@ -197,6 +202,7 @@ impl FlowParams { state_proofs: free_cost.clone(), contract_codes: free_cost.clone(), header_proofs: free_cost.clone(), + transaction_proof: free_cost, } } } @@ -220,6 +226,7 @@ impl FlowParams { request::Kind::StateProofs => &self.costs.state_proofs, request::Kind::Codes => &self.costs.contract_codes, request::Kind::HeaderProofs => &self.costs.header_proofs, + request::Kind::TransactionProof => &self.costs.transaction_proof, }; let amount: U256 = amount.into(); @@ -241,6 +248,7 @@ impl FlowParams { request::Kind::StateProofs => &self.costs.state_proofs, request::Kind::Codes => &self.costs.contract_codes, request::Kind::HeaderProofs => &self.costs.header_proofs, + request::Kind::TransactionProof => &self.costs.transaction_proof, }; let start = credits.current(); diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index c9f278776..57eb232cc 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -101,6 +101,7 @@ impl RequestSet { request::Kind::StateProofs => timeout::PROOFS, request::Kind::Codes => timeout::CONTRACT_CODES, request::Kind::HeaderProofs => timeout::HEADER_PROOFS, + request::Kind::TransactionProof => timeout::TRANSACTION_PROOF, }; base + Duration::milliseconds(kind_timeout) <= now diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index caade3857..2ef7f1f04 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -24,7 +24,7 @@ use ethcore::client::{BlockChainClient, ProvingBlockChainClient}; use ethcore::transaction::PendingTransaction; use ethcore::ids::BlockId; use ethcore::encoded; -use util::{Bytes, RwLock, H256}; +use util::{Bytes, DBValue, RwLock, H256}; use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; @@ -193,6 +193,10 @@ pub trait Provider: Send + Sync { /// Provide pending transactions. fn ready_transactions(&self) -> Vec; + + /// Provide a proof-of-execution for the given transaction proof request. + /// Returns a vector of all state items necessary to execute the transaction. + fn transaction_proof(&self, req: request::TransactionProof) -> Option>; } // Implementation of a light client data provider for a client. @@ -283,6 +287,26 @@ impl Provider for T { } } + fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + use ethcore::transaction::Transaction; + + let id = BlockId::Hash(req.at); + let nonce = match self.nonce(&req.from, id.clone()) { + Some(nonce) => nonce, + None => return None, + }; + let transaction = Transaction { + nonce: nonce, + gas: req.gas, + gas_price: req.gas_price, + action: req.action, + value: req.value, + data: req.data, + }.fake_sign(req.from); + + self.prove_transaction(transaction, id) + } + fn ready_transactions(&self) -> Vec { BlockChainClient::ready_transactions(self) } @@ -343,6 +367,10 @@ impl Provider for LightProvider { None } + fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + None + } + fn ready_transactions(&self) -> Vec { let chain_info = self.chain_info(); self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) diff --git a/ethcore/light/src/types/les_request.rs b/ethcore/light/src/types/les_request.rs index b4940980e..dbff19eb5 100644 --- a/ethcore/light/src/types/les_request.rs +++ b/ethcore/light/src/types/les_request.rs @@ -16,7 +16,8 @@ //! LES request types. -use util::H256; +use ethcore::transaction::Action; +use util::{Address, H256, U256, Uint}; /// Either a hash or a number. #[derive(Debug, Clone, PartialEq, Eq)] @@ -134,6 +135,26 @@ pub struct HeaderProofs { pub requests: Vec, } +/// A request for proof of (simulated) transaction execution. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", binary)] +pub struct TransactionProof { + /// Block hash to request for. + pub at: H256, + /// Address to treat as the caller. + pub from: Address, + /// Action to take: either a call or a create. + pub action: Action, + /// Amount of gas to request proof-of-execution for. + pub gas: U256, + /// Price for each gas. + pub gas_price: U256, + /// Value to simulate sending. + pub value: U256, + /// Transaction data. + pub data: Vec, +} + /// Kinds of requests. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "ipc", binary)] @@ -150,6 +171,8 @@ pub enum Kind { Codes, /// Requesting header proofs (from the CHT). HeaderProofs, + /// Requesting proof of transaction execution. + TransactionProof, } /// Encompasses all possible types of requests in a single structure. @@ -168,6 +191,8 @@ pub enum Request { Codes(ContractCodes), /// Requesting header proofs. HeaderProofs(HeaderProofs), + /// Requesting proof of transaction execution. + TransactionProof(TransactionProof), } impl Request { @@ -180,10 +205,12 @@ impl Request { Request::StateProofs(_) => Kind::StateProofs, Request::Codes(_) => Kind::Codes, Request::HeaderProofs(_) => Kind::HeaderProofs, + Request::TransactionProof(_) => Kind::TransactionProof, } } /// Get the amount of requests being made. + /// In the case of `TransactionProof`, this is the amount of gas being requested. pub fn amount(&self) -> usize { match *self { Request::Headers(ref req) => req.max, @@ -192,6 +219,10 @@ impl Request { Request::StateProofs(ref req) => req.requests.len(), Request::Codes(ref req) => req.code_requests.len(), Request::HeaderProofs(ref req) => req.requests.len(), + Request::TransactionProof(ref req) => match req.gas > usize::max_value().into() { + true => usize::max_value(), + false => req.gas.low_u64() as usize, + } } } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 7f209bad1..d8849ded3 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -24,7 +24,7 @@ use time::precise_time_ns; // util use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, MutexGuard, Hashable}; -use util::{journaldb, TrieFactory, Trie}; +use util::{journaldb, DBValue, TrieFactory, Trie}; use util::{U256, H256, Address, H2048, Uint, FixedHash}; use util::trie::TrieSpec; use util::kvdb::*; @@ -34,7 +34,7 @@ use io::*; use views::BlockView; use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use header::BlockNumber; -use state::{State, CleanupMode}; +use state::{self, State, CleanupMode}; use spec::Spec; use basic_types::Seal; use engines::Engine; @@ -309,17 +309,23 @@ impl Client { /// The env info as of the best block. fn latest_env_info(&self) -> EnvInfo { - let header = self.best_block_header(); + self.env_info(BlockId::Latest).expect("Best block header always stored; qed") + } - EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: self.build_last_hashes(header.hash()), - gas_used: U256::default(), - gas_limit: header.gas_limit(), - } + /// The env info as of a given block. + /// returns `None` if the block unknown. + fn env_info(&self, id: BlockId) -> Option { + self.block_header(id).map(|header| { + EnvInfo { + number: header.number(), + author: header.author(), + timestamp: header.timestamp(), + difficulty: header.difficulty(), + last_hashes: self.build_last_hashes(header.parent_hash()), + gas_used: U256::default(), + gas_limit: header.gas_limit(), + } + }) } fn build_last_hashes(&self, parent_hash: H256) -> Arc { @@ -874,17 +880,8 @@ impl snapshot::DatabaseRestore for Client { impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result { - let header = self.block_header(block).ok_or(CallError::StatePruned)?; - let last_hashes = self.build_last_hashes(header.parent_hash()); - let env_info = EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: last_hashes, - gas_used: U256::zero(), - gas_limit: U256::max_value(), - }; + let env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + // that's just a copy of the state. let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; @@ -910,17 +907,13 @@ impl BlockChainClient for Client { fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result { const UPPER_CEILING: u64 = 1_000_000_000_000u64; - let header = self.block_header(block).ok_or(CallError::StatePruned)?; - let last_hashes = self.build_last_hashes(header.parent_hash()); - let env_info = EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: last_hashes, - gas_used: U256::zero(), - gas_limit: UPPER_CEILING.into(), + let (mut upper, env_info) = { + let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + let initial_upper = env_info.gas_limit; + env_info.gas_limit = UPPER_CEILING.into(); + (initial_upper, env_info) }; + // that's just a copy of the state. let original_state = self.state_at(block).ok_or(CallError::StatePruned)?; let sender = t.sender(); @@ -946,7 +939,6 @@ impl BlockChainClient for Client { .unwrap_or(false)) }; - let mut upper = header.gas_limit(); if !cond(upper)? { // impossible at block gas limit - try `UPPER_CEILING` instead. // TODO: consider raising limit by powers of two. @@ -989,7 +981,7 @@ impl BlockChainClient for Client { fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result { let address = self.transaction_address(id).ok_or(CallError::TransactionNotFound)?; - let header = self.block_header(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; + let mut env_info = self.env_info(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let body = self.block_body(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let mut state = self.state_at_beginning(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let mut txs = body.transactions(); @@ -999,16 +991,6 @@ impl BlockChainClient for Client { } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let last_hashes = self.build_last_hashes(header.hash()); - let mut env_info = EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: last_hashes, - gas_used: U256::default(), - gas_limit: header.gas_limit(), - }; const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed"; let rest = txs.split_off(address.index); for t in txs { @@ -1620,6 +1602,26 @@ impl ::client::ProvingBlockChainClient for Client { .and_then(|x| x) .unwrap_or_else(Vec::new) } + + fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { + let (state, env_info) = match (self.state_at(id), self.env_info(id)) { + (Some(s), Some(e)) => (s, e), + _ => return None, + }; + let mut jdb = self.state_db.lock().journal_db().boxed_clone(); + let backend = state::backend::Proving::new(jdb.as_hashdb_mut()); + + let mut state = state.replace_backend(backend); + let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false }; + let res = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&transaction, options); + + + match res { + Err(ExecutionError::Internal(_)) => return None, + _ => return Some(state.drop().1.extract_proof()), + } + } + } impl Drop for Client { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index dc9cb5944..5d436f4c5 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -765,6 +765,10 @@ impl ProvingBlockChainClient for TestBlockChainClient { fn code_by_hash(&self, _: H256, _: BlockId) -> Bytes { Vec::new() } + + fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option> { + None + } } impl EngineClient for TestBlockChainClient { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index dce708b3a..13abb33f9 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -16,6 +16,7 @@ use std::collections::BTreeMap; use util::{U256, Address, H256, H2048, Bytes, Itertools}; +use util::hashdb::DBValue; use util::stats::Histogram; use blockchain::TreeRoute; use verification::queue::QueueInfo as BlockQueueInfo; @@ -336,4 +337,7 @@ pub trait ProvingBlockChainClient: BlockChainClient { /// Get code by address hash. fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes; + + /// Prove execution of a transaction at the given block. + fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option>; } diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index dfb4465fa..041eb71b4 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -98,7 +98,6 @@ impl Backend for NoCache { /// See module docs for more details. /// /// This doesn't cache anything or rely on the canonical state caches. -#[derive(Debug, Clone, PartialEq)] pub struct Proving { base: H, // state we're proving values from. changed: MemoryDB, // changed state via insertions. @@ -107,8 +106,9 @@ pub struct Proving { impl HashDB for Proving { fn keys(&self) -> HashMap { - self.base.as_hashdb().keys() - .extend(self.changed.keys()) + let mut keys = self.base.as_hashdb().keys(); + keys.extend(self.changed.keys()); + keys } fn get(&self, key: &H256) -> Option { @@ -141,7 +141,7 @@ impl HashDB for Proving { } } -impl Backend for Proving { +impl Backend for Proving { fn as_hashdb(&self) -> &HashDB { self } @@ -183,3 +183,13 @@ impl Proving { self.proof.into_inner().into_iter().collect() } } + +impl Clone for Proving { + fn clone(&self) -> Self { + Proving { + base: self.base.clone(), + changed: self.changed.clone(), + proof: Mutex::new(self.proof.lock().clone()), + } + } +} diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index e25d7d404..ce711ffbd 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -264,6 +264,19 @@ impl State { Ok(state) } + /// Swap the current backend for another. + // TODO: [rob] find a less hacky way to avoid duplication of `Client::state_at`. + pub fn replace_backend(self, backend: T) -> State { + State { + db: backend, + root: self.root, + cache: self.cache, + checkpoints: self.checkpoints, + account_start_nonce: self.account_start_nonce, + factories: self.factories, + } + } + /// Create a recoverable checkpoint of this state. pub fn checkpoint(&mut self) { self.checkpoints.get_mut().push(HashMap::new()); diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index 3b1939cae..8217413ef 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -125,3 +125,13 @@ impl AsHashDB for T { self } } + +impl<'a> AsHashDB for &'a mut HashDB { + fn as_hashdb(&self) -> &HashDB { + &**self + } + + fn as_hashdb_mut(&mut self) -> &mut HashDB { + &mut **self + } +} From 4158693470035f9a6577b65f428b13a6ae506f3e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 11:07:38 +0100 Subject: [PATCH 07/91] network messages for transaction proof --- ethcore/light/src/net/mod.rs | 88 +++++++++++++++++++++++- ethcore/light/src/net/request_credits.rs | 3 +- ethcore/light/src/net/tests/mod.rs | 6 +- ethcore/light/src/provider.rs | 2 +- 4 files changed, 95 insertions(+), 4 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index ad1eaac00..6bb1cb227 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -26,7 +26,7 @@ use io::TimerToken; use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use rlp::{RlpStream, Stream, UntrustedRlp, View}; use util::hash::H256; -use util::{Bytes, Mutex, RwLock, U256}; +use util::{Bytes, DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; use std::collections::HashMap; @@ -211,6 +211,8 @@ pub trait Handler: Send + Sync { /// Called when a peer responds with header proofs. Each proof should be a block header coupled /// with a series of trie nodes is ascending order by distance from the root. fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } + /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. + fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } /// Called to "tick" the handler periodically. fn tick(&self, _ctx: &BasicContext) { } /// Called on abort. This signals to handlers that they should clean up @@ -535,6 +537,9 @@ impl LightProtocol { packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), + packet::GET_TRANSACTION_PROOF => self.get_transaction_proof(peer, io, rlp), + packet::TRANSACTION_PROOF => self.transaction_proof(peer, io, rlp), + packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), other => { @@ -1178,6 +1183,87 @@ impl LightProtocol { Ok(()) } + // Receive a request for proof-of-execution. + fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + const MAX_GAS: usize = 10_000_000; // refuse to execute more than this amount of gas at once. + use util::Uint; + + let peers = self.peers.read(); + let peer = match peers.get(peer) { + Some(peer) => peer, + None => { + debug!(target: "les", "Ignoring request from unknown peer"); + return Ok(()) + } + }; + let mut peer = peer.lock(); + + let req_id: u64 = raw.val_at(0)?; + + let req = { + let req_rlp = raw.at(1)?; + request::TransactionProof { + at: req_rlp.val_at(0)?, + from: req_rlp.val_at(1)?, + action: if req_rlp.at(2)?.is_empty() { + Action::Create + } else { + Action::Call(req_rlp.val_at(2)?) + }, + gas: ::std::cmp::min(req_rlp.val_at(3)?, MAX_GAS.into()), + gas_price: req_rlp.val_at(4)?, + value: req_rlp.val_at(5)?, + data: req_rlp.val_at(6)?, + } + }; + + // always charge the peer for all the gas. + peer.deduct_max(&self.flow_params, request::Kind::TransactionProof, req.gas.low_u64() as usize)?; + + let response = match self.provider.transaction_proof(req) { + Some(res) => res, + None => vec![], + }; + + let cur_credits = peer.local_credits.current(); + + io.respond(packet::TRANSACTION_PROOF, { + let mut stream = RlpStream::new_list(3); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); + + for state_item in response { + stream.append(&&state_item[..]); + } + + stream.out() + }); + + Ok(()) + } + + // Receive a response for proof-of-execution. + fn transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; + let raw_proof: Vec = raw.at(2)?.iter() + .map(|rlp| { + let mut db_val = DBValue::new(); + db_val.append_slice(rlp.data()?); + Ok(db_val) + }) + .collect::, ::rlp::DecoderError>>()?; + + let req_id = id_guard.defuse(); + for handler in &self.handlers { + handler.on_transaction_proof(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_proof); + } + + Ok(()) + } + // Receive a set of transactions to relay. fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_TRANSACTIONS: usize = 256; diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 3a1cb9996..97aa9b431 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -114,7 +114,7 @@ impl RlpEncodable for CostTable { .append(&cost.1); } - s.begin_list(6); + s.begin_list(7); append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers); append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies); @@ -122,6 +122,7 @@ impl RlpEncodable for CostTable { append_cost(s, packet::GET_PROOFS, &self.state_proofs); append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes); append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs); + append_cost(s, packet::GET_TRANSACTION_PROOF, &self.transaction_proof); } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 4efa6f680..8faba0b00 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -32,7 +32,7 @@ use provider::Provider; use request::{self, Request, Headers}; use rlp::*; -use util::{Bytes, H256, U256}; +use util::{Bytes, DBValue, H256, U256}; use std::sync::Arc; @@ -127,6 +127,10 @@ impl Provider for TestProvider { None } + fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + None + } + fn ready_transactions(&self) -> Vec { self.0.client.ready_transactions() } diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 2ef7f1f04..3f55a6b99 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -367,7 +367,7 @@ impl Provider for LightProvider { None } - fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { None } From 32f906fe9fc4b3e5b5ef9af4e345525128153829 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 11:54:32 +0100 Subject: [PATCH 08/91] transaction proof test --- ethcore/src/client/client.rs | 4 +-- ethcore/src/state/backend.rs | 54 +++++++++++++++++++++++++++--------- ethcore/src/tests/client.rs | 43 +++++++++++++++++++++++++++- 3 files changed, 85 insertions(+), 16 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index d8849ded3..ad61fd629 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -308,13 +308,13 @@ impl Client { } /// The env info as of the best block. - fn latest_env_info(&self) -> EnvInfo { + pub fn latest_env_info(&self) -> EnvInfo { self.env_info(BlockId::Latest).expect("Best block header always stored; qed") } /// The env info as of a given block. /// returns `None` if the block unknown. - fn env_info(&self, id: BlockId) -> Option { + pub fn env_info(&self, id: BlockId) -> Option { self.block_header(id).map(|header| { EnvInfo { number: header.number(), diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index 041eb71b4..5ab620b0e 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -66,21 +66,48 @@ pub trait Backend: Send { fn is_known_null(&self, address: &Address) -> bool; } -/// A raw backend which simply wraps a hashdb and does no caching. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct NoCache(T); +/// A raw backend used to check proofs of execution. +/// +/// This doesn't delete anything since execution proofs won't have mangled keys +/// and we want to avoid collisions. +// TODO: when account lookup moved into backends, this won't rely as tenuously on intended +// usage. +#[derive(Clone, PartialEq)] +pub struct ProofCheck(MemoryDB); -impl NoCache { - /// Create a new `NoCache` backend. - pub fn new(inner: T) -> Self { NoCache(inner) } - - /// Consume the backend, yielding the inner database. - pub fn into_inner(self) -> T { self.0 } +impl ProofCheck { + /// Create a new `ProofCheck` backend from the given state items. + pub fn new(proof: &[DBValue]) -> Self { + let mut db = MemoryDB::new(); + for item in proof { db.insert(item); } + ProofCheck(db) + } } -impl Backend for NoCache { - fn as_hashdb(&self) -> &HashDB { self.0.as_hashdb() } - fn as_hashdb_mut(&mut self) -> &mut HashDB { self.0.as_hashdb_mut() } +impl HashDB for ProofCheck { + fn keys(&self) -> HashMap { self.0.keys() } + fn get(&self, key: &H256) -> Option { + self.0.get(key) + } + + fn contains(&self, key: &H256) -> bool { + self.0.contains(key) + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.0.insert(value) + } + + fn emplace(&mut self, key: H256, value: DBValue) { + self.0.emplace(key, value) + } + + fn remove(&mut self, _key: &H256) { } +} + +impl Backend for ProofCheck { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } fn add_to_account_cache(&mut self, _addr: Address, _data: Option, _modified: bool) {} fn cache_code(&self, _hash: H256, _code: Arc>) {} fn get_cached_account(&self, _addr: &Address) -> Option> { None } @@ -95,7 +122,8 @@ impl Backend for NoCache { } /// Proving state backend. -/// See module docs for more details. +/// This keeps track of all state values loaded during usage of this backend. +/// The proof-of-execution can be extracted with `extract_proof`. /// /// This doesn't cache anything or rely on the canonical state caches. pub struct Proving { diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 809604b13..ded70b363 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -16,7 +16,8 @@ use io::IoChannel; use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId}; -use state::CleanupMode; +use state::{self, State, CleanupMode}; +use executive::Executive; use ethereum; use block::IsBlock; use tests::helpers::*; @@ -342,3 +343,43 @@ fn does_not_propagate_delayed_transactions() { assert_eq!(2, client.ready_transactions().len()); assert_eq!(2, client.miner().pending_transactions().len()); } + +#[test] +fn transaction_proof() { + use ::client::ProvingBlockChainClient; + + let client_result = generate_dummy_client(0); + let client = client_result.reference(); + let address = Address::random(); + let test_spec = Spec::new_test(); + for _ in 0..20 { + let mut b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]); + b.block_mut().fields_mut().state.add_balance(&address, &5.into(), CleanupMode::NoEmpty).unwrap(); + b.block_mut().fields_mut().state.commit().unwrap(); + let b = b.close_and_lock().seal(&*test_spec.engine, vec![]).unwrap(); + client.import_sealed_block(b).unwrap(); // account change is in the journal overlay + } + + let transaction = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(Address::default()), + value: 5.into(), + data: Vec::new(), + }.fake_sign(address); + + let proof = client.prove_transaction(transaction.clone(), BlockId::Latest).unwrap(); + let backend = state::backend::ProofCheck::new(&proof); + + let mut factories = ::factory::Factories::default(); + factories.accountdb = ::account_db::Factory::Plain; // raw state values, no mangled keys. + let root = client.best_block_header().state_root(); + + let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap(); + Executive::new(&mut state, &client.latest_env_info(), &*test_spec.engine, &factories.vm) + .transact(&transaction, Default::default()).unwrap(); + + assert_eq!(state.balance(&Address::default()).unwrap(), 5.into()); + assert_eq!(state.balance(&address).unwrap(), 95.into()); +} From 2b671b847655c183cabe0441ecf83e87ee2c066d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 12:43:43 +0100 Subject: [PATCH 09/91] test for transaction proof message --- ethcore/light/src/net/tests/mod.rs | 54 ++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 8faba0b00..6a9de1467 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -20,7 +20,7 @@ use ethcore::blockchain_info::BlockChainInfo; use ethcore::client::{EachBlockWith, TestBlockChainClient}; use ethcore::ids::BlockId; -use ethcore::transaction::PendingTransaction; +use ethcore::transaction::{Action, PendingTransaction}; use ethcore::encoded; use network::{PeerId, NodeId}; @@ -32,7 +32,7 @@ use provider::Provider; use request::{self, Request, Headers}; use rlp::*; -use util::{Bytes, DBValue, H256, U256}; +use util::{Address, Bytes, DBValue, H256, U256}; use std::sync::Arc; @@ -492,6 +492,56 @@ fn get_contract_code() { proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body); } +#[test] +fn proof_of_execution() { + let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); + } + + let req_id = 112; + let mut request = Request::TransactionProof (request::TransactionProof { + at: H256::default(), + from: Address::default(), + action: Action::Call(Address::default()), + gas: 100.into(), + gas_price: 0.into(), + value: 0.into(), + data: Vec::new(), + }); + + // first: a valid amount to request execution of. + let request_body = encode_request(&request, req_id); + let response = { + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::TransactionProof, 100); + + let mut response_stream = RlpStream::new_list(3); + response_stream.append(&req_id).append(&new_creds).begin_list(0); + + response_stream.out() + }; + + let expected = Expect::Respond(packet::TRANSACTION_PROOF, response); + proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + + // next: way too much requested gas. + if let Request::TransactionProof(ref mut req) = request { + req.gas = 100_000_000.into(); + } + let req_id = 113; + let request_body = encode_request(&request, req_id); + let expected = Expect::Punish(1); + proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); +} + #[test] fn id_guard() { use super::request_set::RequestSet; From 7c541117b32a71ed60a2fb6af6d2913065eca0ee Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 19:01:41 +0100 Subject: [PATCH 10/91] fix call bug --- ethcore/src/client/client.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index ad61fd629..21936e1c5 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -880,7 +880,8 @@ impl snapshot::DatabaseRestore for Client { impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result { - let env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + env_info.gas_limit = U256::max_value(); // that's just a copy of the state. let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; @@ -963,13 +964,13 @@ impl BlockChainClient for Client { { while upper - lower > 1.into() { let mid = (lower + upper) / 2.into(); - trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper); + trace!(target: "binary_chop", "{} .. {} .. {}", lower, mid, upper); let c = cond(mid)?; match c { true => upper = mid, false => lower = mid, }; - trace!(target: "estimate_gas", "{} => {} .. {}", c, lower, upper); + trace!(target: "binary_chop", "{} => {} .. {}", c, lower, upper); } Ok(upper) } From 69e82e15a35a906ec71b5d94e906c32dd930fa85 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 20:10:38 +0100 Subject: [PATCH 11/91] request transaction proofs from on_demand --- ethcore/light/src/net/mod.rs | 5 +- ethcore/light/src/on_demand/mod.rs | 81 +++++++++++++++++++++++++- ethcore/light/src/on_demand/request.rs | 35 ++++++++++- ethcore/src/client/client.rs | 1 - ethcore/src/env_info.rs | 4 +- ethcore/src/lib.rs | 3 +- ethcore/src/state/mod.rs | 62 ++++++++++++++++++-- 7 files changed, 178 insertions(+), 13 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 6bb1cb227..58ab9662e 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -1185,7 +1185,10 @@ impl LightProtocol { // Receive a request for proof-of-execution. fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - const MAX_GAS: usize = 10_000_000; // refuse to execute more than this amount of gas at once. + // refuse to execute more than this amount of gas at once. + // this is appx. the point at which the proof of execution would no longer fit in + // a single Devp2p packet. + const MAX_GAS: usize = 50_000_000; use util::Uint; let peers = self.peers.read(); diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index c34e2d922..1efff4005 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -23,12 +23,14 @@ use std::collections::HashMap; use ethcore::basic_account::BasicAccount; use ethcore::encoded; use ethcore::receipt::Receipt; +use ethcore::state::ProvedExecution; +use ethcore::executed::{Executed, ExecutionError}; use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::{RlpStream, Stream}; -use util::{Bytes, RwLock, U256}; +use util::{Bytes, DBValue, RwLock, U256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; @@ -50,6 +52,7 @@ enum Pending { BlockReceipts(request::BlockReceipts, Sender>), Account(request::Account, Sender), Code(request::Code, Sender), + TxProof(request::TransactionProof, Sender>), } /// On demand request service. See module docs for more details. @@ -347,6 +350,50 @@ impl OnDemand { self.orphaned_requests.write().push(pending) } + /// Request proof-of-execution for a transaction. + pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { + let (sender, receiver) = oneshot::channel(); + + self.dispatch_transaction_proof(ctx, req, sender); + + receiver + } + + fn dispatch_transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof, sender: Sender>) { + let num = req.header.number(); + let les_req = LesRequest::TransactionProof(les_request::TransactionProof { + at: req.header.hash(), + from: req.tx.sender(), + gas: req.tx.gas, + gas_price: req.tx.gas_price, + action: req.tx.action.clone(), + value: req.tx.value, + data: req.tx.data.clone(), + }); + let pending = Pending::TxProof(req, sender); + + // we're looking for a peer with serveStateSince(num) + for (id, peer) in self.peers.read().iter() { + if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { + match ctx.request_from(*id, les_req.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + pending + ); + return + } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + } + } + } + + trace!(target: "on_demand", "No suitable peer for request"); + self.orphaned_requests.write().push(pending) + } + // dispatch orphaned requests, and discard those for which the corresponding // receiver has been dropped. fn dispatch_orphaned(&self, ctx: &BasicContext) { @@ -390,6 +437,8 @@ impl OnDemand { if !check_hangup(&mut sender) { self.dispatch_account(ctx, req, sender) }, Pending::Code(req, mut sender) => if !check_hangup(&mut sender) { self.dispatch_code(ctx, req, sender) }, + Pending::TxProof(req, mut sender) => + if !check_hangup(&mut sender) { self.dispatch_transaction_proof(ctx, req, sender) } } } } @@ -596,6 +645,36 @@ impl Handler for OnDemand { } } + fn on_transaction_proof(&self, ctx: &EventContext, req_id: ReqId, items: &[DBValue]) { + let peer = ctx.peer(); + let req = match self.pending_requests.write().remove(&req_id) { + Some(req) => req, + None => return, + }; + + match req { + Pending::TxProof(req, sender) => { + match req.check_response(items) { + ProvedExecution::Complete(executed) => { + sender.complete(Ok(executed)); + return + } + ProvedExecution::Failed(err) => { + sender.complete(Err(err)); + return + } + ProvedExecution::BadProof => { + warn!("Error handling response for transaction proof request"); + ctx.disable_peer(peer); + } + } + + self.dispatch_transaction_proof(ctx.as_basic(), req, sender); + } + _ => panic!("Only transaction proof request dispatches transaction proof requests; qed"), + } + } + fn tick(&self, ctx: &BasicContext) { self.dispatch_orphaned(ctx) } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 3964137d9..3a72db51d 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -16,12 +16,18 @@ //! Request types, verification, and verification errors. +use std::sync::Arc; + use ethcore::basic_account::BasicAccount; use ethcore::encoded; +use ethcore::engines::Engine; +use ethcore::env_info::EnvInfo; use ethcore::receipt::Receipt; +use ethcore::state::{self, ProvedExecution}; +use ethcore::transaction::SignedTransaction; use rlp::{RlpStream, Stream, UntrustedRlp, View}; -use util::{Address, Bytes, HashDB, H256, U256}; +use util::{Address, Bytes, DBValue, HashDB, H256, U256}; use util::memorydb::MemoryDB; use util::sha3::Hashable; use util::trie::{Trie, TrieDB, TrieError}; @@ -231,6 +237,33 @@ impl Code { } } +/// Request for transaction execution, along with the parts necessary to verify the proof. +pub struct TransactionProof { + /// The transaction to request proof of. + pub tx: SignedTransaction, + /// Block header. + pub header: encoded::Header, + /// Transaction environment info. + pub env_info: EnvInfo, + /// Consensus engine. + pub engine: Arc, +} + +impl TransactionProof { + /// Check the proof, returning the proved execution or indicate that the proof was bad. + pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution { + let root = self.header.state_root(); + + state::check_proof( + state_items, + root, + &self.tx, + &*self.engine, + &self.env_info, + ) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 21936e1c5..8692e831a 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1616,7 +1616,6 @@ impl ::client::ProvingBlockChainClient for Client { let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false }; let res = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&transaction, options); - match res { Err(ExecutionError::Internal(_)) => return None, _ => return Some(state.drop().1.extract_proof()), diff --git a/ethcore/src/env_info.rs b/ethcore/src/env_info.rs index 9e1bb6a40..cc42008d5 100644 --- a/ethcore/src/env_info.rs +++ b/ethcore/src/env_info.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Environment information for transaction execution. + use std::cmp; use std::sync::Arc; use util::{U256, Address, H256, Hashable}; @@ -25,7 +27,7 @@ use ethjson; pub type LastHashes = Vec; /// Information concerning the execution environment for a message-call/contract-creation. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EnvInfo { /// The block number. pub number: BlockNumber, diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 15c5834cd..ea7de9e61 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -79,7 +79,6 @@ //! cargo build --release //! ``` - extern crate ethcore_io as io; extern crate rustc_serialize; extern crate crypto; @@ -140,12 +139,12 @@ pub mod action_params; pub mod db; pub mod verification; pub mod state; +pub mod env_info; #[macro_use] pub mod evm; mod cache_manager; mod blooms; mod basic_types; -mod env_info; mod pod_account; mod state_db; mod account_db; diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 7aff83c14..3c5a3bc09 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -31,6 +31,7 @@ use factory::Factories; use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; +use types::executed::{Executed, ExecutionError}; use types::state_diff::StateDiff; use transaction::SignedTransaction; use state_db::StateDB; @@ -60,6 +61,17 @@ pub struct ApplyOutcome { /// Result type for the execution ("application") of a transaction. pub type ApplyResult = Result; +/// Return type of proof validity check. +#[derive(Debug, Clone)] +pub enum ProvedExecution { + /// Proof wasn't enough to complete execution. + BadProof, + /// The transaction failed, but not due to a bad proof. + Failed(ExecutionError), + /// The transaction successfully completd with the given proof. + Complete(Executed), +} + #[derive(Eq, PartialEq, Clone, Copy, Debug)] /// Account modification state. Used to check if the account was /// Modified in between commits and overall. @@ -150,6 +162,39 @@ impl AccountEntry { } } +/// Check the given proof of execution. +/// `Err(ExecutionError::Internal)` indicates failure, everything else indicates +/// a successful proof (as the transaction itself may be poorly chosen). +pub fn check_proof( + proof: &[::util::DBValue], + root: H256, + transaction: &SignedTransaction, + engine: &Engine, + env_info: &EnvInfo, +) -> ProvedExecution { + let backend = self::backend::ProofCheck::new(proof); + let mut factories = Factories::default(); + factories.accountdb = ::account_db::Factory::Plain; + + let res = State::from_existing( + backend, + root, + engine.account_start_nonce(), + factories + ); + + let mut state = match res { + Ok(state) => state, + Err(_) => return ProvedExecution::BadProof, + }; + + match state.execute(env_info, engine, transaction, false) { + Ok(executed) => ProvedExecution::Complete(executed), + Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof, + Err(e) => ProvedExecution::Failed(e), + } +} + /// Representation of the entire state of all accounts in the system. /// /// `State` can work together with `StateDB` to share account cache. @@ -548,16 +593,12 @@ impl State { Ok(()) } - /// Execute a given transaction. + /// Execute a given transaction, producing a receipt and an optional trace. /// This will change the state accordingly. pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult { // let old = self.to_pod(); - let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true }; - let vm_factory = self.factories.vm.clone(); - let e = Executive::new(self, env_info, engine, &vm_factory).transact(t, options)?; - - // TODO uncomment once to_pod() works correctly. + let e = self.execute(env_info, engine, t, tracing)?; // trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod())); let state_root = if env_info.number < engine.params().eip98_transition { self.commit()?; @@ -570,6 +611,15 @@ impl State { Ok(ApplyOutcome{receipt: receipt, trace: e.trace}) } + // Execute a given transaction. + fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> Result { + let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true }; + let vm_factory = self.factories.vm.clone(); + + Executive::new(self, env_info, engine, &vm_factory).transact(t, options) + } + + /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. #[cfg_attr(feature="dev", allow(match_ref_pats))] From 645011427ab9b1b9a08a31ecf0852c72d370413c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 26 Feb 2017 13:48:56 +0100 Subject: [PATCH 12/91] most of proved_execution rpc --- rpc/src/v1/impls/eth.rs | 14 +++++---- rpc/src/v1/impls/light/eth.rs | 59 ++++++++++++++++++++++++++++------- rpc/src/v1/traits/eth.rs | 8 ++--- 3 files changed, 59 insertions(+), 22 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index f47ab2055..cf8bdbbe1 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -627,26 +627,28 @@ impl Eth for EthClient where self.send_raw_transaction(raw) } - fn call(&self, request: CallRequest, num: Trailing) -> Result { + fn call(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); let signed = self.sign_call(request)?; let result = match num.0 { - BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), - num => take_weak!(self.client).call(&signed, num.into(), Default::default()), + BlockNumber::Pending => take_weakf!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + num => take_weakf!(self.client).call(&signed, num.into(), Default::default()), }; - result + future::done(result .map(|b| b.output.into()) .map_err(errors::from_call_error) + ).boxed() } - fn estimate_gas(&self, request: CallRequest, num: Trailing) -> Result { + fn estimate_gas(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); let signed = self.sign_call(request)?; - take_weak!(self.client).estimate_gas(&signed, num.0.into()) + future::done(take_weakf!(self.client).estimate_gas(&signed, num.0.into()) .map(Into::into) .map_err(errors::from_call_error) + ).boxed() } fn compile_lll(&self, _: String) -> Result { diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 2e129d31e..fdd5e193b 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -38,7 +38,7 @@ use rlp::{UntrustedRlp, View}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use util::{RwLock, U256}; -use futures::{future, Future, BoxFuture}; +use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; @@ -153,6 +153,27 @@ impl EthClient { .unwrap_or_else(|| future::err(err_no_context()).boxed()) }).boxed() } + + // helper for getting proved execution. + fn proved_execution(&self, req: CallRequest, num: Trailing) -> Result, Error> { + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + let req: CRequest = req.into(); + let id = num.0.into(); + + let from = request.from.unwrap_or(Address::zero()); + let action = request.to.map_or(Action::Create, Action::Call); + let gas: request.gas.unwrap_or(U256::from(10_000_000)); + let value = request.value.unwrap_or_else(U256::zero); + let data = request.data.map_or_else(Vec::new, |d| d.to_vec()); + + sync.with_context(|ctx| { + let nonce_fut = req.nonce.map(Some).ok_or(err_no_context()) + .or_else(|_| self.account(from, id).map(|acc| acc.map(|a| a.nonce))); + + let gas_price_fut = req.gas_price.map(Some).ok_or(err_no_context()) + .or_else(|_| unimplemented!()) + }) + } } impl Eth for EthClient { @@ -328,12 +349,25 @@ impl Eth for EthClient { self.send_raw_transaction(raw) } - fn call(&self, req: CallRequest, num: Trailing) -> Result { - Err(errors::unimplemented(None)) + fn call(&self, req: CallRequest, num: Trailing) -> BoxFuture { + self.proved_execution().and_then(|res| { + match res { + Ok(Some(exec)) => Ok(exec.output.into()), + Ok(None) => Err(errors::unknown_block()), + Err(e) => Err(errors::execution(e)), + } + }).boxed() } - fn estimate_gas(&self, req: CallRequest, num: Trailing) -> Result { - Err(errors::unimplemented(None)) + fn estimate_gas(&self, req: CallRequest, num: Trailing) -> BoxFuture { + // TODO: binary chop for more accurate estimates. + self.proved_execution().and_then(|res| { + match res { + Ok(Some(exec)) => Ok((exec.refunded + exec.gas_used).into()), + Ok(None) => Err(errors::unknown_block()), + Err(e) => Err(errors::execution(e)), + } + }).boxed() } fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { @@ -361,19 +395,20 @@ impl Eth for EthClient { } fn compilers(&self) -> Result, Error> { - Err(errors::unimplemented(None)) + Err(errors::deprecated("Compilation functionality is deprecated.".to_string())) + } - fn compile_lll(&self, _code: String) -> Result { - Err(errors::unimplemented(None)) + fn compile_lll(&self, _: String) -> Result { + Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string())) } - fn compile_solidity(&self, _code: String) -> Result { - Err(errors::unimplemented(None)) + fn compile_serpent(&self, _: String) -> Result { + Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string())) } - fn compile_serpent(&self, _code: String) -> Result { - Err(errors::unimplemented(None)) + fn compile_solidity(&self, _: String) -> Result { + Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string())) } fn logs(&self, _filter: Filter) -> Result, Error> { diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index eaf608c60..365ad9320 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -110,12 +110,12 @@ build_rpc_trait! { fn submit_transaction(&self, Bytes) -> Result; /// Call contract, returning the output data. - #[rpc(name = "eth_call")] - fn call(&self, CallRequest, Trailing) -> Result; + #[rpc(async, name = "eth_call")] + fn call(&self, CallRequest, Trailing) -> BoxFuture; /// Estimate gas needed for execution of given contract. - #[rpc(name = "eth_estimateGas")] - fn estimate_gas(&self, CallRequest, Trailing) -> Result; + #[rpc(async, name = "eth_estimateGas")] + fn estimate_gas(&self, CallRequest, Trailing) -> BoxFuture; /// Get transaction by its hash. #[rpc(name = "eth_getTransactionByHash")] From af235e564ebadf5e2039b65baa2c080b455ddbe9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 26 Feb 2017 15:05:33 +0100 Subject: [PATCH 13/91] proved execution future --- ethcore/light/src/client/mod.rs | 24 +++++--- rpc/src/v1/helpers/dispatch.rs | 99 +++++++++++++++++++------------ rpc/src/v1/impls/eth.rs | 12 +++- rpc/src/v1/impls/light/eth.rs | 102 +++++++++++++++++++++++++------- 4 files changed, 166 insertions(+), 71 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 4a4da2917..2872e0eec 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -230,22 +230,32 @@ impl Client { } /// Get a handle to the verification engine. - pub fn engine(&self) -> &Engine { - &*self.engine + pub fn engine(&self) -> &Arc { + &self.engine } - fn latest_env_info(&self) -> EnvInfo { - let header = self.best_block_header(); + /// Get the latest environment info. + pub fn latest_env_info(&self) -> EnvInfo { + self.env_info(BlockId::Latest) + .expect("Best block header and recent hashes always stored; qed") + } - EnvInfo { + /// Get environment info for a given block. + pub fn env_info(&self, id: BlockId) -> Option { + let header = match self.block_header(id) { + Some(hdr) => hdr, + None => return None, + }; + + Some(EnvInfo { number: header.number(), author: header.author(), timestamp: header.timestamp(), difficulty: header.difficulty(), - last_hashes: self.build_last_hashes(header.hash()), + last_hashes: self.build_last_hashes(header.parent_hash()), gas_used: Default::default(), gas_limit: header.gas_limit(), - } + }) } fn build_last_hashes(&self, mut parent_hash: H256) -> Arc> { diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 0bea7f9a1..b11ada048 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -158,6 +158,54 @@ impl Dispatcher for FullDispatcher, + client: Arc, + on_demand: Arc, + cache: Arc>, +) -> BoxFuture, Error> { + const GAS_PRICE_SAMPLE_SIZE: usize = 100; + + if let Some(cached) = cache.lock().gas_price_corpus() { + return future::ok(cached).boxed() + } + + let cache = cache.clone(); + let eventual_corpus = sync.with_context(|ctx| { + // get some recent headers with gas used, + // and request each of the blocks from the network. + let block_futures = client.ancestry_iter(BlockId::Latest) + .filter(|hdr| hdr.gas_used() != U256::default()) + .take(GAS_PRICE_SAMPLE_SIZE) + .map(request::Body::new) + .map(|req| on_demand.block(ctx, req)); + + // as the blocks come in, collect gas prices into a vector + stream::futures_unordered(block_futures) + .fold(Vec::new(), |mut v, block| { + for t in block.transaction_views().iter() { + v.push(t.gas_price()) + } + + future::ok(v) + }) + .map(move |v| { + // produce a corpus from the vector, cache it, and return + // the median as the intended gas price. + let corpus: ::stats::Corpus<_> = v.into(); + cache.lock().set_gas_price_corpus(corpus.clone()); + corpus + }) + }); + + match eventual_corpus { + Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } +} + /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. /// Light client `ETH` RPC. #[derive(Clone)] @@ -197,44 +245,12 @@ impl LightDispatcher { /// Get a recent gas price corpus. // TODO: this could be `impl Trait`. pub fn gas_price_corpus(&self) -> BoxFuture, Error> { - const GAS_PRICE_SAMPLE_SIZE: usize = 100; - - if let Some(cached) = self.cache.lock().gas_price_corpus() { - return future::ok(cached).boxed() - } - - let cache = self.cache.clone(); - let eventual_corpus = self.sync.with_context(|ctx| { - // get some recent headers with gas used, - // and request each of the blocks from the network. - let block_futures = self.client.ancestry_iter(BlockId::Latest) - .filter(|hdr| hdr.gas_used() != U256::default()) - .take(GAS_PRICE_SAMPLE_SIZE) - .map(request::Body::new) - .map(|req| self.on_demand.block(ctx, req)); - - // as the blocks come in, collect gas prices into a vector - stream::futures_unordered(block_futures) - .fold(Vec::new(), |mut v, block| { - for t in block.transaction_views().iter() { - v.push(t.gas_price()) - } - - future::ok(v) - }) - .map(move |v| { - // produce a corpus from the vector, cache it, and return - // the median as the intended gas price. - let corpus: ::stats::Corpus<_> = v.into(); - cache.lock().set_gas_price_corpus(corpus.clone()); - corpus - }) - }); - - match eventual_corpus { - Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(), - None => future::err(errors::network_disabled()).boxed(), - } + fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + ) } /// Get an account's next nonce. @@ -285,7 +301,12 @@ impl Dispatcher for LightDispatcher { // fast path for known gas price. match request_gas_price { Some(gas_price) => future::ok(with_gas_price(gas_price)).boxed(), - None => self.gas_price_corpus().and_then(|corp| match corp.median() { + None => fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone() + ).and_then(|corp| match corp.median() { Some(median) => future::ok(*median), None => future::ok(DEFAULT_GAS_PRICE), // fall back to default on error. }).map(with_gas_price).boxed() diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index cf8bdbbe1..47143ac75 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -629,10 +629,13 @@ impl Eth for EthClient where fn call(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = self.sign_call(request)?; + let signed = match self.sign_call(request) { + Ok(signed) => signed, + Err(e) => return future::err(e).boxed(), + }; let result = match num.0 { - BlockNumber::Pending => take_weakf!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + BlockNumber::Pending => take_weakf!(self.miner).call(&*take_weakf!(self.client), &signed, Default::default()), num => take_weakf!(self.client).call(&signed, num.into(), Default::default()), }; @@ -644,7 +647,10 @@ impl Eth for EthClient where fn estimate_gas(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = self.sign_call(request)?; + let signed = match self.sign_call(request) { + Ok(signed) => signed, + Err(e) => return future::err(e).boxed(), + }; future::done(take_weakf!(self.client).estimate_gas(&signed, num.0.into()) .map(Into::into) .map_err(errors::from_call_error) diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index f6be478fa..f889faf00 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -24,6 +24,7 @@ use std::sync::Arc; use jsonrpc_core::Error; use jsonrpc_macros::Trailing; +use light::cache::Cache as LightDataCache; use light::client::Client as LightClient; use light::{cht, TransactionQueue}; use light::on_demand::{request, OnDemand}; @@ -31,17 +32,18 @@ use light::on_demand::{request, OnDemand}; use ethcore::account_provider::{AccountProvider, DappId}; use ethcore::basic_account::BasicAccount; use ethcore::encoded; +use ethcore::executed::{Executed, ExecutionError}; use ethcore::ids::BlockId; -use ethcore::transaction::SignedTransaction; +use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction}; use ethsync::LightSync; use rlp::{UntrustedRlp, View}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use util::{RwLock, U256}; +use util::{RwLock, Mutex, FixedHash, Uint, U256}; use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; -use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; +use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch}; use v1::helpers::block_import::is_major_importing; use v1::traits::Eth; use v1::types::{ @@ -60,6 +62,7 @@ pub struct EthClient { on_demand: Arc, transaction_queue: Arc>, accounts: Arc, + cache: Arc>, } // helper for internal error: on demand sender cancelled. @@ -67,6 +70,8 @@ fn err_premature_cancel(_cancel: oneshot::Canceled) -> Error { errors::internal("on-demand sender prematurely cancelled", "") } +type ExecutionResult = Result; + impl EthClient { /// Create a new `EthClient` with a handle to the light sync instance, client, /// and on-demand request service, which is assumed to be attached as a handler. @@ -76,6 +81,7 @@ impl EthClient { on_demand: Arc, transaction_queue: Arc>, accounts: Arc, + cache: Arc>, ) -> Self { EthClient { sync: sync, @@ -83,6 +89,7 @@ impl EthClient { on_demand: on_demand, transaction_queue: transaction_queue, accounts: accounts, + cache: cache, } } @@ -149,24 +156,77 @@ impl EthClient { } // helper for getting proved execution. - fn proved_execution(&self, req: CallRequest, num: Trailing) -> Result, Error> { - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + fn proved_execution(&self, req: CallRequest, num: Trailing) -> BoxFuture { + const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]); + + + let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone()); let req: CRequest = req.into(); let id = num.0.into(); - let from = request.from.unwrap_or(Address::zero()); - let action = request.to.map_or(Action::Create, Action::Call); - let gas: request.gas.unwrap_or(U256::from(10_000_000)); - let value = request.value.unwrap_or_else(U256::zero); - let data = request.data.map_or_else(Vec::new, |d| d.to_vec()); + let from = req.from.unwrap_or(Address::zero()); + let nonce_fut = match req.nonce { + Some(nonce) => future::ok(Some(nonce)).boxed(), + None => self.account(from, id).map(|acc| acc.map(|a| a.nonce)).boxed(), + }; - sync.with_context(|ctx| { - let nonce_fut = req.nonce.map(Some).ok_or(err_no_context()) - .or_else(|_| self.account(from, id).map(|acc| acc.map(|a| a.nonce))); + let gas_price_fut = match req.gas_price { + Some(price) => future::ok(price).boxed(), + None => dispatch::fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + ).map(|corp| match corp.median() { + Some(median) => *median, + None => DEFAULT_GAS_PRICE, + }).boxed() + }; - let gas_price_fut = req.gas_price.map(Some).ok_or(err_no_context()) - .or_else(|_| unimplemented!()) - }) + // if nonce resolves, this should too since it'll be in the LRU-cache. + let header_fut = self.header(id); + + // fetch missing transaction fields from the network. + nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| { + let action = req.to.map_or(Action::Create, Action::Call); + let gas = req.gas.unwrap_or(U256::from(10_000_000)); // better gas amount? + let value = req.value.unwrap_or_else(U256::zero); + let data = req.data.map_or_else(Vec::new, |d| d.to_vec()); + + future::done(match nonce { + Some(n) => Ok(EthTransaction { + nonce: n, + action: action, + gas: gas, + gas_price: gas_price, + value: value, + data: data, + }.fake_sign(from)), + None => Err(errors::unknown_block()), + }) + }).join(header_fut).and_then(move |(tx, hdr)| { + // then request proved execution. + // TODO: get last-hashes from network. + let (env_info, hdr) = match (client.env_info(id), hdr) { + (Some(env_info), Some(hdr)) => (env_info, hdr), + _ => return future::err(errors::unknown_block()).boxed(), + }; + let request = request::TransactionProof { + tx: tx, + header: hdr, + env_info: env_info, + engine: client.engine().clone(), + }; + + let proved_future = sync.with_context(move |ctx| { + on_demand.transaction_proof(ctx, request).map_err(err_premature_cancel).boxed() + }); + + match proved_future { + Some(fut) => fut.boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + }).boxed() } } @@ -344,10 +404,9 @@ impl Eth for EthClient { } fn call(&self, req: CallRequest, num: Trailing) -> BoxFuture { - self.proved_execution().and_then(|res| { + self.proved_execution(req, num).and_then(|res| { match res { - Ok(Some(exec)) => Ok(exec.output.into()), - Ok(None) => Err(errors::unknown_block()), + Ok(exec) => Ok(exec.output.into()), Err(e) => Err(errors::execution(e)), } }).boxed() @@ -355,10 +414,9 @@ impl Eth for EthClient { fn estimate_gas(&self, req: CallRequest, num: Trailing) -> BoxFuture { // TODO: binary chop for more accurate estimates. - self.proved_execution().and_then(|res| { + self.proved_execution(req, num).and_then(|res| { match res { - Ok(Some(exec)) => Ok((exec.refunded + exec.gas_used).into()), - Ok(None) => Err(errors::unknown_block()), + Ok(exec) => Ok((exec.refunded + exec.gas_used).into()), Err(e) => Err(errors::execution(e)), } }).boxed() From bbb50caa893de2ee126af16f1f4e36b3c9f0bdf7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 3 Mar 2017 19:25:29 +0100 Subject: [PATCH 14/91] initial request definitions --- ethcore/light/src/lib.rs | 2 +- ethcore/light/src/types/les_request.rs | 228 -------- ethcore/light/src/types/mod.rs.in | 2 +- ethcore/light/src/types/request.rs | 707 +++++++++++++++++++++++++ 4 files changed, 709 insertions(+), 230 deletions(-) delete mode 100644 ethcore/light/src/types/les_request.rs create mode 100644 ethcore/light/src/types/request.rs diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index b6e06a02b..ebf5f4f08 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -57,7 +57,7 @@ mod types; pub use self::provider::Provider; pub use self::transaction_queue::TransactionQueue; -pub use types::les_request as request; +pub use types::request as request; #[macro_use] extern crate log; diff --git a/ethcore/light/src/types/les_request.rs b/ethcore/light/src/types/les_request.rs deleted file mode 100644 index dbff19eb5..000000000 --- a/ethcore/light/src/types/les_request.rs +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! LES request types. - -use ethcore::transaction::Action; -use util::{Address, H256, U256, Uint}; - -/// Either a hash or a number. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum HashOrNumber { - /// Block hash variant. - Hash(H256), - /// Block number variant. - Number(u64), -} - -impl From for HashOrNumber { - fn from(hash: H256) -> Self { - HashOrNumber::Hash(hash) - } -} - -impl From for HashOrNumber { - fn from(num: u64) -> Self { - HashOrNumber::Number(num) - } -} - -/// A request for block headers. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Headers { - /// Starting block number or hash. - pub start: HashOrNumber, - /// The maximum amount of headers which can be returned. - pub max: usize, - /// The amount of headers to skip between each response entry. - pub skip: u64, - /// Whether the headers should proceed in falling number from the initial block. - pub reverse: bool, -} - -/// A request for specific block bodies. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Bodies { - /// Hashes which bodies are being requested for. - pub block_hashes: Vec -} - -/// A request for transaction receipts. -/// -/// This request is answered with a list of transaction receipts for each block -/// requested. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Receipts { - /// Block hashes to return receipts for. - pub block_hashes: Vec, -} - -/// A request for a state proof -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct StateProof { - /// Block hash to query state from. - pub block: H256, - /// Key of the state trie -- corresponds to account hash. - pub key1: H256, - /// Key in that account's storage trie; if empty, then the account RLP should be - /// returned. - pub key2: Option, - /// if greater than zero, trie nodes beyond this level may be omitted. - pub from_level: u32, // could even safely be u8; trie w/ 32-byte key can be at most 64-levels deep. -} - -/// A request for state proofs. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct StateProofs { - /// All the proof requests. - pub requests: Vec, -} - -/// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct ContractCode { - /// Block hash - pub block_hash: H256, - /// Account key (== sha3(address)) - pub account_key: H256, -} - -/// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct ContractCodes { - /// Block hash and account key (== sha3(address)) pairs to fetch code for. - pub code_requests: Vec, -} - -/// A request for a header proof from the Canonical Hash Trie. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct HeaderProof { - /// Number of the CHT. - pub cht_number: u64, - /// Block number requested. May not be 0: genesis isn't included in any CHT. - pub block_number: u64, - /// If greater than zero, trie nodes beyond this level may be omitted. - pub from_level: u32, -} - -/// A request for header proofs from the CHT. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct HeaderProofs { - /// All the proof requests. - pub requests: Vec, -} - -/// A request for proof of (simulated) transaction execution. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct TransactionProof { - /// Block hash to request for. - pub at: H256, - /// Address to treat as the caller. - pub from: Address, - /// Action to take: either a call or a create. - pub action: Action, - /// Amount of gas to request proof-of-execution for. - pub gas: U256, - /// Price for each gas. - pub gas_price: U256, - /// Value to simulate sending. - pub value: U256, - /// Transaction data. - pub data: Vec, -} - -/// Kinds of requests. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum Kind { - /// Requesting headers. - Headers, - /// Requesting block bodies. - Bodies, - /// Requesting transaction receipts. - Receipts, - /// Requesting proofs of state trie nodes. - StateProofs, - /// Requesting contract code by hash. - Codes, - /// Requesting header proofs (from the CHT). - HeaderProofs, - /// Requesting proof of transaction execution. - TransactionProof, -} - -/// Encompasses all possible types of requests in a single structure. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum Request { - /// Requesting headers. - Headers(Headers), - /// Requesting block bodies. - Bodies(Bodies), - /// Requesting transaction receipts. - Receipts(Receipts), - /// Requesting state proofs. - StateProofs(StateProofs), - /// Requesting contract codes. - Codes(ContractCodes), - /// Requesting header proofs. - HeaderProofs(HeaderProofs), - /// Requesting proof of transaction execution. - TransactionProof(TransactionProof), -} - -impl Request { - /// Get the kind of request this is. - pub fn kind(&self) -> Kind { - match *self { - Request::Headers(_) => Kind::Headers, - Request::Bodies(_) => Kind::Bodies, - Request::Receipts(_) => Kind::Receipts, - Request::StateProofs(_) => Kind::StateProofs, - Request::Codes(_) => Kind::Codes, - Request::HeaderProofs(_) => Kind::HeaderProofs, - Request::TransactionProof(_) => Kind::TransactionProof, - } - } - - /// Get the amount of requests being made. - /// In the case of `TransactionProof`, this is the amount of gas being requested. - pub fn amount(&self) -> usize { - match *self { - Request::Headers(ref req) => req.max, - Request::Bodies(ref req) => req.block_hashes.len(), - Request::Receipts(ref req) => req.block_hashes.len(), - Request::StateProofs(ref req) => req.requests.len(), - Request::Codes(ref req) => req.code_requests.len(), - Request::HeaderProofs(ref req) => req.requests.len(), - Request::TransactionProof(ref req) => match req.gas > usize::max_value().into() { - true => usize::max_value(), - false => req.gas.low_u64() as usize, - } - } - } -} diff --git a/ethcore/light/src/types/mod.rs.in b/ethcore/light/src/types/mod.rs.in index 0adfbf0e4..eba551b53 100644 --- a/ethcore/light/src/types/mod.rs.in +++ b/ethcore/light/src/types/mod.rs.in @@ -14,4 +14,4 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -pub mod les_request; \ No newline at end of file +pub mod request; diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs new file mode 100644 index 000000000..279296cf8 --- /dev/null +++ b/ethcore/light/src/types/request.rs @@ -0,0 +1,707 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light protocol request types. + +use std::collections::HashMap; + +use ethcore::transaction::Action; +use util::{Address, H256, U256, Uint}; + +// re-exports of request types. +pub use self::header::{ + Complete as CompleteHeadersRequest, + Incomplete as IncompleteHeadersRequest, + Response as HeadersResponse +}; +pub use self::header_proof::{ + Complete as CompleteHeaderProofRequest, + Incomplete as IncompleteHeaderProofRequest, + Response as HeaderProofResponse +}; +pub use self::block_body::{ + Complete as CompleteBodyRequest, + Incomplete as IncompleteBodyRequest, + Response as BodyResponse +}; +pub use self::receipts::{ + Complete as CompleteReceiptsRequest, + Incomplete as IncompleteReceiptsRequest + Response as ReceiptsResponse +}; +pub use self::account::{ + Complete as CompleteAccountRequest, + Incomplete as IncompleteAccountRequest, + Response as AccountResponse, +}; +pub use self::storage::{ + Complete as CompleteStorageRequest, + Incomplete as IncompleteStorageRequest, + Response as StorageResponse +}; +pub use self::contract_code::{ + Complete as CompleteCodeRequest, + Incomplete as IncompleteCodeRequest, + Response as CodeResponse, +}; + +/// Error indicating a reference to a non-existent or wrongly-typed output. +pub struct NoSuchOutput; + +/// An input to a request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Field { + /// A pre-specified input. + Scalar(T), + /// An input which can be resolved later on. + /// (Request index, output index) + BackReference(usize, usize), +} + +impl From for Field { + fn from(val: T) -> Self { + Field::Scalar(val) + } +} + +/// Request outputs which can be reused as inputs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Output { + /// A 32-byte hash output. + Hash(H256), + /// An unsigned-integer output. + Number(u64), +} + +/// Response output kinds which can be used as back-references. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OutputKind { + /// A 32-byte hash output. + Hash, + /// An unsigned-integer output. + Number, +} + +/// Either a hash or a number. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", binary)] +pub enum HashOrNumber { + /// Block hash variant. + Hash(H256), + /// Block number variant. + Number(u64), +} + +impl From for HashOrNumber { + fn from(hash: H256) -> Self { + HashOrNumber::Hash(hash) + } +} + +impl From for HashOrNumber { + fn from(num: u64) -> Self { + HashOrNumber::Number(num) + } +} + +/// A potentially incomplete request. +pub trait IncompleteRequest: Sized { + type Complete; + + /// Check prior outputs against the needed inputs. + /// + /// This is called to ensure consistency of this request with + /// others in the same packet. + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>; + + /// Note that this request will produce the following outputs. + fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind); + + /// Fill the request. + /// + /// This function is provided an "output oracle" which allows fetching of + /// prior request outputs. + /// Only outputs previously checked with `check_outputs` will be available. + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result; +} + +/// Header request. +pub mod header { + use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::U256; + + /// Potentially incomplete headers request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Start block. + pub start: Field, + /// Skip between. + pub skip: U256, + /// Maximum to return. + pub max: U256, + /// Whether to reverse from start. + pub reverse: bool, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.start { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => + f(req, idx, OutputKind::Hash).or_else(|| f(req, idx, OutputKind::Number)) + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) { } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let start = match self.start { + Field::Scalar(start) => start, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash.into(), + Output::Number(num) => num.into(), + } + }; + + Ok(Complete { + start: start, + skip: self.skip, + max: self.max, + reverse: self.reverse, + }) + } + + } + + /// A complete header request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Start block. + pub start: HashOrNumber, + /// Skip between. + pub skip: U256, + /// Maximum to return. + pub max: U256, + /// Whether to reverse from start. + pub reverse: bool, + } + + /// The output of a request for headers. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + header: Vec, + } + + impl Response { + /// Fill reusable outputs by writing them into the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } + } +} + +/// Request and response for header proofs. +pub mod header_proof { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete header proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block number. + pub num: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Number), + } + } + + fn note_outputs(&self, mut note: F) where F: FnMut(usize, OutputKind) { + note(1, OutputKind::Hash); + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let num = match self.num { + Field::Scalar(num) => num, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Number(num) => num, + _ => return Err(NoSuchOutput), + } + }; + + Ok(Complete { + num: num, + }) + } + + } + + /// A complete header proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get a header proof for. + pub num: u64, + } + + /// The output of a request for a header proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion proof of the header and total difficulty in the CHT. + pub proof: Vec, + /// The proved header's hash. + pub hash: H256, + /// The proved header's total difficulty. + pub td: U256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(1, Output::Hash(self.hash)); + } + } +} + +/// Request and response for block receipts +pub mod block_receipts { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let hash = match self.hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput), + } + }; + + Ok(Complete { + hash: hash, + }) + } + + } + + /// A complete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get block receipts for. + pub hash: H256, + } + + /// The output of a request for block receipts. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The block receipts. + pub receipts: Vec + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } +} + +/// Request and response for a block body +pub mod block_body { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete block body request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let hash = match self.hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput), + } + }; + + Ok(Complete { + hash: hash, + }) + } + + } + + /// A complete block body request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The hash to get a block body for. + pub hash: H256, + } + + /// The output of a request for block body. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The block body. + pub body: encoded::Body, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } +} + +/// A request for an account proof. +pub mod account { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete request for an account proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } + + Ok(()) + } + + fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { + f(0, OutputKind::Hash); + f(1, OutputKind::Hash); + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let address_hash = match self.address_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + address_hash: address_hash, + }) + } + + } + + /// A complete request for an account. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + } + + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Account nonce. + pub nonce: U256, + /// Account balance. + pub balance: U256, + /// Account's code hash. + pub code_hash: H256, + /// Account's storage trie root. + pub storage_root: H256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(0, Output::Hash(self.code_hash)); + f(1, Output::Hash(self.storage_root)); + } + } +} + +/// A request for a storage proof. +pub mod storage { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete request for an storage proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + /// Hash of the storage key. + pub key_hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.key_hash { + f(req, idx, OutputKind::Hash)? + } + + Ok(()) + } + + fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { + f(0, OutputKind::Hash); + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let address_hash = match self.address_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let key_hash = match self.key_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + address_hash: address_hash, + key_hash: key_hash + }) + } + + } + + /// A complete request for a storage proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + /// Storage key hash. + pub key_hash: H256, + } + + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Storage value. + pub value: H256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(0, Output::Hash(self.value)); + } + } +} + +/// A request for contract code. +pub mod contract_code { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete _ request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The code hash. + pub code_hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + if let Field::BackReference(req, idx) = self.code_hash { + f(req, idx, OutputKind::Hash)?; + } + + Ok(()) + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let code_hash = match self.code_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + code_hash: code_hash, + }) + } + + } + + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The code hash. + pub code_hash: H256, + } + + /// The output of a request for + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The requested code. + pub code: Bytes, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } +} From 41effadb94ed4f36624d6d1af53d22133e764e78 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Mar 2017 12:21:06 +0100 Subject: [PATCH 15/91] RLP encoding and decoding for requests --- ethcore/light/src/client/header_chain.rs | 1 - ethcore/light/src/lib.rs | 2 +- ethcore/light/src/types/request.rs | 265 +++++++++++++++++++++++ 3 files changed, 266 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 575938cd5..9dcd25888 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -24,7 +24,6 @@ //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. // TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers` -// use std::collections::{BTreeMap, HashMap}; diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ebf5f4f08..ada58d8de 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -26,7 +26,7 @@ //! use-cases like sending transactions from a personal account. //! //! The light client performs a header-only sync, doing verification and pruning -//! historical blocks. Upon pruning, batches of 2048 blocks have a number => hash +//! historical blocks. Upon pruning, batches of 2048 blocks have a number => (hash, TD) //! mapping sealed into "canonical hash tries" which can later be used to verify //! historical block queries from peers. diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 279296cf8..259f3def7 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use ethcore::transaction::Action; +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Address, H256, U256, Uint}; // re-exports of request types. @@ -77,6 +78,32 @@ impl From for Field { } } +impl Decodable for Field { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.val_at::(0)? { + 0 => Ok(Field::Scalar(rlp.val_at::(1)?)), + 1 => Ok({ + let inner_rlp = rlp.at(1)?; + Field::BackReference(inner_rlp.val_at(0)?, inner_rlp.val_at(1)?) + }) + _ => Err(DecoderError::Custom("Unknown discriminant for PIP field.")), + } + } +} + +impl Encodable for Field { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + match *self { + Field::Scalar(ref data) => s.append(&0u8).append(data), + Field::BackReference(ref req, ref idx) => + s.append(&1u8).begin_list(2).append(req).append(idx), + }; + } +} + /// Request outputs which can be reused as inputs. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Output { @@ -117,6 +144,114 @@ impl From for HashOrNumber { } } +/// All request types, as they're sent over the network. +pub enum Request { + /// A request for block headers. + Headers(IncompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(IncompleteHeaderProofRequest), + // TransactionIndex, + /// A request for a block's receipts. + Receipts(IncompleteReceiptsRequest), + /// A request for a block body. + Body(IncompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(IncompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(IncompleteStorageRequest), + /// A request for contract code. + Code(IncompleteCodeRequest), + // Transaction proof. +} + +impl Request { + fn kind(&self) -> RequestKind { + match *self { + Request::Headers(_) => RequestKind::Headers, + Request::HeaderProof(_) => RequestKind::HeaderProof, + Request::Receipts(_) => RequestKind::Receipts, + Request::Body(_) => RequestKind::Body, + Request::Account(_) => RequestKind::Account, + Request::Storage(_) => RequestKind::Storage, + Request::Code(_) => RequestKind::Code, + } + } +} + +impl Decodable for Request { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.val_at::(0)? { + RequestKind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), + RequestKind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), + RequestKind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), + RequestKind::Body => Ok(Request::Body(rlp.val_at(1)?)), + RequestKind::Account => Ok(Request::Account(rlp.val_at(1)?)), + RequestKind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), + RequestKind::Code => Ok(Request::Code(rlp.val_at(1)?)), + } + } +} + +impl Encodable for Request { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2).append(&self.kind()); + + match *self { + Request::Headers(ref req) => s.append(req), + Request::HeaderProof(ref req) => s.append(req), + Request::Receipts(ref req) => s.append(req), + Request::Body(ref req) => s.append(req), + Request::Account(ref req) => s.append(req), + Request::Storage(ref req) => s.append(req), + Request::Code(ref req) => s.append(req), + }; + } +} + + +/// Kinds of requests. +/// Doubles as the "ID" field of the request. +#[repr(u8)] +pub enum RequestKind { + /// A request for headers. + Headers = 0, + HeaderProof = 1, + // TransactionIndex = 2, + Receipts = 3, + Body = 4, + Account = 5, + Storage = 6, + Code = 7, + // TransactionProof = 8, +} + +impl Decodable for RequestKind { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.as_val::()? { + 0 => Ok(RequestKind::Headers), + 1 => Ok(RequestKind::HeaderProof), + // 2 => Ok(RequestKind::TransactionIndex, + 3 => Ok(RequestKind::Receipts), + 4 => Ok(RequestKind::Body), + 5 => Ok(RequestKind::Account), + 6 => Ok(RequestKind::Storage), + 7 => Ok(RequestKind::Code), + // 8 => Ok(RequestKind::TransactionProof), + _ => Err(DecoderError::Custom("Unknown PIP request ID.")), + } + } +} + +impl Encodable for RequestKind { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(self as &u8); + } +} + /// A potentially incomplete request. pub trait IncompleteRequest: Sized { type Complete; @@ -144,6 +279,7 @@ pub trait IncompleteRequest: Sized { pub mod header { use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::U256; /// Potentially incomplete headers request. @@ -159,6 +295,28 @@ pub mod header { pub reverse: bool, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + start: rlp.val_at(0)?, + skip: rlp.val_at(1)?, + max: rlp.val_at(2)?, + reverse: rlp.val_at(3)? + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4) + .append(&self.start) + .append(&self.skip) + .append(&self.max) + .append(&self.reverse); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -223,6 +381,7 @@ pub mod header { /// Request and response for header proofs. pub mod header_proof { use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete header proof request. @@ -232,6 +391,21 @@ pub mod header_proof { pub num: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + num: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.num); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -295,6 +469,7 @@ pub mod header_proof { /// Request and response for block receipts pub mod block_receipts { use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete block receipts request. @@ -304,6 +479,21 @@ pub mod block_receipts { pub hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + hash: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -360,6 +550,7 @@ pub mod block_receipts { pub mod block_body { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete block body request. @@ -369,6 +560,21 @@ pub mod block_body { pub hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + hash: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -425,6 +631,7 @@ pub mod block_body { pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an account proof. @@ -436,6 +643,24 @@ pub mod account { pub address_hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address_hash: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.block_hash) + .append(&self.address_hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -522,6 +747,7 @@ pub mod account { pub mod storage { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an storage proof. @@ -535,6 +761,26 @@ pub mod storage { pub key_hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address_hash: rlp.val_at(1)?, + key_hash: rlp.val_at(2)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append(&self.block_hash) + .append(&self.address_hash) + .append(&self.key_hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -628,6 +874,7 @@ pub mod storage { pub mod contract_code { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete _ request. @@ -639,6 +886,24 @@ pub mod contract_code { pub code_hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + code_hash: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.block_hash) + .append(&self.code_hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; From 8e9faa416da13b70e9dbc751b6285925bce77382 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Mar 2017 17:03:58 +0100 Subject: [PATCH 16/91] proofs of non-existance in ProvingBlockChainClient --- ethcore/src/client/client.rs | 17 +++--------- ethcore/src/client/test_client.rs | 13 ++++----- ethcore/src/client/traits.rs | 12 +++------ ethcore/src/state/account.rs | 13 ++++----- ethcore/src/state/mod.rs | 45 +++++++++++++++---------------- 5 files changed, 40 insertions(+), 60 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 63be1da07..54e433a72 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1585,23 +1585,14 @@ impl MayPanic for Client { } impl ::client::ProvingBlockChainClient for Client { - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec { + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)> { self.state_at(id) - .and_then(move |state| state.prove_storage(key1, key2, from_level).ok()) - .unwrap_or_else(Vec::new) + .and_then(move |state| state.prove_storage(key1, key2).ok()) } - fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec { + fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, ::types::basic_account::BasicAccount)> { self.state_at(id) - .and_then(move |state| state.prove_account(key1, from_level).ok()) - .unwrap_or_else(Vec::new) - } - - fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes { - self.state_at(id) - .and_then(move |state| state.code_by_address_hash(account_key).ok()) - .and_then(|x| x) - .unwrap_or_else(Vec::new) + .and_then(move |state| state.prove_account(key1).ok()) } fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 5d436f4c5..79783175d 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -38,6 +38,7 @@ use error::{ImportResult, Error as EthcoreError}; use evm::{Factory as EvmFactory, VMType, Schedule}; use miner::{Miner, MinerService, TransactionImportResult}; use spec::Spec; +use types::basic_account::BasicAccount; use types::mode::Mode; use types::pruning_info::PruningInfo; @@ -754,16 +755,12 @@ impl BlockChainClient for TestBlockChainClient { } impl ProvingBlockChainClient for TestBlockChainClient { - fn prove_storage(&self, _: H256, _: H256, _: u32, _: BlockId) -> Vec { - Vec::new() + fn prove_storage(&self, _: H256, _: H256, _: BlockId) -> Option<(Vec, H256)> { + None } - fn prove_account(&self, _: H256, _: u32, _: BlockId) -> Vec { - Vec::new() - } - - fn code_by_hash(&self, _: H256, _: BlockId) -> Bytes { - Vec::new() + fn prove_account(&self, _: H256, _: BlockId) -> Option<(Vec, BasicAccount)> { + None } fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option> { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 4af20de0f..145398ef6 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -34,6 +34,7 @@ use env_info::LastHashes; use block_import_error::BlockImportError; use ipc::IpcConfig; use types::ids::*; +use types::basic_account::BasicAccount; use types::trace_filter::Filter as TraceFilter; use types::call_analytics::CallAnalytics; use types::blockchain_info::BlockChainInfo; @@ -309,19 +310,12 @@ pub trait ProvingBlockChainClient: BlockChainClient { /// /// Both provided keys assume a secure trie. /// Returns a vector of raw trie nodes (in order from the root) proving the storage query. - /// Nodes after `from_level` may be omitted. - /// An empty vector indicates unservable query. - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec; + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)>; /// Prove account existence at a specific block id. /// The key is the keccak hash of the account's address. /// Returns a vector of raw trie nodes (in order from the root) proving the query. - /// Nodes after `from_level` may be omitted. - /// An empty vector indicates unservable query. - fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec; - - /// Get code by address hash. - fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes; + fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, BasicAccount)>; /// Prove execution of a transaction at the given block. fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option>; diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index ebdf36d89..51f7e3b98 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -438,18 +438,19 @@ impl Account { /// trie. /// `storage_key` is the hash of the desired storage key, meaning /// this will only work correctly under a secure trie. - /// Returns a merkle proof of the storage trie node with all nodes before `from_level` - /// omitted. - pub fn prove_storage(&self, db: &HashDB, storage_key: H256, from_level: u32) -> Result, Box> { + pub fn prove_storage(&self, db: &HashDB, storage_key: H256) -> Result<(Vec, H256), Box> { use util::trie::{Trie, TrieDB}; use util::trie::recorder::Recorder; - let mut recorder = Recorder::with_depth(from_level); + let mut recorder = Recorder::new(); let trie = TrieDB::new(db, &self.storage_root)?; - let _ = trie.get_with(&storage_key, &mut recorder)?; + let item: U256 = { + let query = (&mut recorder, ::rlp::decode); + trie.get_with(&storage_key, query)?.unwrap_or_else(U256::zero) + }; - Ok(recorder.drain().into_iter().map(|r| r.data).collect()) + Ok((recorder.drain().into_iter().map(|r| r.data).collect(), item.into())) } } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 3c5a3bc09..745fb2980 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -31,6 +31,7 @@ use factory::Factories; use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; +use types::basic_account::BasicAccount; use types::executed::{Executed, ExecutionError}; use types::state_diff::StateDiff; use transaction::SignedTransaction; @@ -857,47 +858,43 @@ impl State { // State proof implementations; useful for light client protocols. impl State { /// Prove an account's existence or nonexistence in the state trie. - /// Returns a merkle proof of the account's trie node with all nodes before `from_level` - /// omitted or an encountered trie error. + /// Returns a merkle proof of the account's trie node omitted or an encountered trie error. + /// If the account doesn't exist in the trie, prove that and return defaults. /// Requires a secure trie to be used for accurate results. /// `account_key` == sha3(address) - pub fn prove_account(&self, account_key: H256, from_level: u32) -> trie::Result> { - let mut recorder = Recorder::with_depth(from_level); + pub fn prove_account(&self, account_key: H256) -> trie::Result<(Vec, BasicAccount)> { + let mut recorder = Recorder::new(); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; - trie.get_with(&account_key, &mut recorder)?; + let maybe_account: Option = { + let query = (&mut recorder, ::rlp::decode); + trie.get_with(&account_key, query)? + }; + let account = maybe_account.unwrap_or_else(|| BasicAccount { + balance: 0.into(), + nonce: self.account_start_nonce, + code_hash: SHA3_EMPTY, + storage_root: ::util::sha3::SHA3_NULL_RLP, + }); - Ok(recorder.drain().into_iter().map(|r| r.data).collect()) + Ok((recorder.drain().into_iter().map(|r| r.data).collect(), account)) } /// Prove an account's storage key's existence or nonexistence in the state. - /// Returns a merkle proof of the account's storage trie with all nodes before - /// `from_level` omitted. Requires a secure trie to be used for correctness. + /// Returns a merkle proof of the account's storage trie. + /// Requires a secure trie to be used for correctness. /// `account_key` == sha3(address) /// `storage_key` == sha3(key) - pub fn prove_storage(&self, account_key: H256, storage_key: H256, from_level: u32) -> trie::Result> { + pub fn prove_storage(&self, account_key: H256, storage_key: H256) -> trie::Result<(Vec, H256)> { // TODO: probably could look into cache somehow but it's keyed by // address, not sha3(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let acc = match trie.get_with(&account_key, Account::from_rlp)? { Some(acc) => acc, - None => return Ok(Vec::new()), + None => return Ok((Vec::new(), H256::new())), }; let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account_key); - acc.prove_storage(account_db.as_hashdb(), storage_key, from_level) - } - - /// Get code by address hash. - /// Only works when backed by a secure trie. - pub fn code_by_address_hash(&self, account_key: H256) -> trie::Result> { - let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; - let mut acc = match trie.get_with(&account_key, Account::from_rlp)? { - Some(acc) => acc, - None => return Ok(None), - }; - - let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account_key); - Ok(acc.cache_code(account_db.as_hashdb()).map(|c| (&*c).clone())) + acc.prove_storage(account_db.as_hashdb(), storage_key) } } From 87f3d53607266e6f055c97c0d9882d03e72e08d3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Mar 2017 17:36:56 +0100 Subject: [PATCH 17/91] new requests in provider. --- ethcore/light/src/client/mod.rs | 47 ------- ethcore/light/src/provider.rs | 196 ++++++++++++----------------- ethcore/light/src/types/request.rs | 115 ++++++++++++++++- 3 files changed, 191 insertions(+), 167 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 2872e0eec..34f7ed990 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -315,50 +315,3 @@ impl LightChainClient for Client { Client::cht_root(self, i) } } - -// dummy implementation, should be removed when a `TestClient` is added. -impl ::provider::Provider for Client { - fn chain_info(&self) -> BlockChainInfo { - Client::chain_info(self) - } - - fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { - None - } - - fn earliest_state(&self) -> Option { - None - } - - fn block_header(&self, id: BlockId) -> Option { - Client::block_header(self, id) - } - - fn block_body(&self, _id: BlockId) -> Option { - None - } - - fn block_receipts(&self, _hash: &H256) -> Option { - None - } - - fn state_proof(&self, _req: ::request::StateProof) -> Vec { - Vec::new() - } - - fn contract_code(&self, _req: ::request::ContractCode) -> Bytes { - Vec::new() - } - - fn header_proof(&self, _req: ::request::HeaderProof) -> Option<(encoded::Header, Vec)> { - None - } - - fn transaction_proof(&self, _req: ::request::TransactionProof) -> Option> { - None - } - - fn ready_transactions(&self) -> Vec<::ethcore::transaction::PendingTransaction> { - Vec::new() - } -} diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 3f55a6b99..4e43296ab 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -59,10 +59,10 @@ pub trait Provider: Send + Sync { /// /// The returned vector may have any length in the range [0, `max`], but the /// results within must adhere to the `skip` and `reverse` parameters. - fn block_headers(&self, req: request::Headers) -> Vec { + fn block_headers(&self, req: request::CompleteHeadersRequest) -> Option { use request::HashOrNumber; - if req.max == 0 { return Vec::new() } + if req.max == 0 { return None } let best_num = self.chain_info().best_block_number; let start_num = match req.start { @@ -70,7 +70,7 @@ pub trait Provider: Send + Sync { HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { None => { trace!(target: "les_provider", "Unknown block hash {} requested", hash); - return Vec::new(); + return None; } Some(header) => { let num = header.number(); @@ -79,7 +79,9 @@ pub trait Provider: Send + Sync { if req.max == 1 || canon_hash != Some(hash) { // Non-canonical header or single header requested. - return vec![header]; + return Some(::request::HeadersResponse { + headers: vec![header], + }) } num @@ -87,109 +89,39 @@ pub trait Provider: Send + Sync { } }; - (0u64..req.max as u64) + let headers = (0u64..req.max as u64) .map(|x: u64| x.saturating_mul(req.skip + 1)) .take_while(|x| if req.reverse { x < &start_num } else { best_num.saturating_sub(start_num) >= *x }) .map(|x| if req.reverse { start_num - x } else { start_num + x }) .map(|x| self.block_header(BlockId::Number(x))) .take_while(|x| x.is_some()) .flat_map(|x| x) - .collect() + .collect(); + + Some(::request::HeadersResponse { headers: headers }) } /// Get a block header by id. fn block_header(&self, id: BlockId) -> Option; - /// Provide as many as possible of the requested blocks (minus the headers) encoded - /// in RLP format. - fn block_bodies(&self, req: request::Bodies) -> Vec> { - req.block_hashes.into_iter() - .map(|hash| self.block_body(BlockId::Hash(hash))) - .collect() - } + /// Fulfill a block body request. + fn block_body(&self, req: request::CompleteBodyRequest) -> Option; - /// Get a block body by id. - fn block_body(&self, id: BlockId) -> Option; + /// Fulfill a request for block receipts. + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; - /// Provide the receipts as many as possible of the requested blocks. - /// Returns a vector of RLP-encoded lists of receipts. - fn receipts(&self, req: request::Receipts) -> Vec { - req.block_hashes.into_iter() - .map(|hash| self.block_receipts(&hash)) - .map(|receipts| receipts.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec())) - .collect() - } + /// Get an account proof. + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option; - /// Get a block's receipts as an RLP-encoded list by block hash. - fn block_receipts(&self, hash: &H256) -> Option; + /// Get a storage proof. + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option; - /// Provide a set of merkle proofs, as requested. Each request is a - /// block hash and request parameters. - /// - /// Returns a vector of RLP-encoded lists satisfying the requests. - fn proofs(&self, req: request::StateProofs) -> Vec { - use rlp::{RlpStream, Stream}; - - let mut results = Vec::with_capacity(req.requests.len()); - - for request in req.requests { - let proof = self.state_proof(request); - - let mut stream = RlpStream::new_list(proof.len()); - for node in proof { - stream.append_raw(&node, 1); - } - - results.push(stream.out()); - } - - results - } - - /// Get a state proof from a request. Each proof should be a vector - /// of rlp-encoded trie nodes, in ascending order by distance from the root. - fn state_proof(&self, req: request::StateProof) -> Vec; - - /// Provide contract code for the specified (block_hash, account_hash) pairs. - /// Each item in the resulting vector is either the raw bytecode or empty. - fn contract_codes(&self, req: request::ContractCodes) -> Vec { - req.code_requests.into_iter() - .map(|req| self.contract_code(req)) - .collect() - } - - /// Get contract code by request. Either the raw bytecode or empty. - fn contract_code(&self, req: request::ContractCode) -> Bytes; - - /// Provide header proofs from the Canonical Hash Tries as well as the headers - /// they correspond to -- each element in the returned vector is a 2-tuple. - /// The first element is a block header and the second a merkle proof of - /// the header in a requested CHT. - fn header_proofs(&self, req: request::HeaderProofs) -> Vec { - use rlp::{self, RlpStream, Stream}; - - req.requests.into_iter() - .map(|req| self.header_proof(req)) - .map(|maybe_proof| match maybe_proof { - None => rlp::EMPTY_LIST_RLP.to_vec(), - Some((header, proof)) => { - let mut stream = RlpStream::new_list(2); - stream.append_raw(&header.into_inner(), 1).begin_list(proof.len()); - - for node in proof { - stream.append_raw(&node, 1); - } - - stream.out() - } - }) - .collect() - } + /// Provide contract code for the specified (block_hash, code_hash) pair. + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option; /// Provide a header proof from a given Canonical Hash Trie as well as the - /// corresponding header. The first element is the block header and the - /// second is a merkle proof of the CHT. - fn header_proof(&self, req: request::HeaderProof) -> Option<(encoded::Header, Vec)>; + /// corresponding header. + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; /// Provide pending transactions. fn ready_transactions(&self) -> Vec; @@ -217,32 +149,52 @@ impl Provider for T { BlockChainClient::block_header(self, id) } - fn block_body(&self, id: BlockId) -> Option { + fn block_body(&self, req: request::CompleteBodyRequest) -> Option; BlockChainClient::block_body(self, id) + .map(|body| ::request::BodyResponse { body: body }) } - fn block_receipts(&self, hash: &H256) -> Option { + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; BlockChainClient::block_receipts(self, hash) + .map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode(&x) }) } - fn state_proof(&self, req: request::StateProof) -> Vec { - match req.key2 { - Some(key2) => self.prove_storage(req.key1, key2, req.from_level, BlockId::Hash(req.block)), - None => self.prove_account(req.key1, req.from_level, BlockId::Hash(req.block)), - } + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + self.prove_account(req.address_hash, BlockId::Hash(req.block_hash)).map(|(proof, acc)| { + ::request::AccountResponse { + proof: proof, + nonce: acc.nonce, + balance: acc.balance, + code_hash: acc.code_hash, + storage_root: acc.storage_root, + } + })) } - fn contract_code(&self, req: request::ContractCode) -> Bytes { - self.code_by_hash(req.account_key, BlockId::Hash(req.block_hash)) + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + self.prove_account(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { + ::request::StorageResponse { + proof: proof, + value: item, + } + })) } - fn header_proof(&self, req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { - if Some(req.cht_number) != cht::block_to_cht_number(req.block_number) { - debug!(target: "les_provider", "Requested CHT number mismatch with block number."); - return None; - } + fn contract_code(&self, req: request::ContractCode) -> Option { + self.state_data(&req.code_hash) + .map(|code| ::request::CodeResponse { code: code }) + } - let mut needed_hdr = None; + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; + let cht_number = match cht::block_to_cht_number(req.num) { + Some(cht_num) => cht_num, + None => { + debug!(target: "les_provider", "Requested CHT proof with invalid block number"); + return None; + } + }; + + let mut needed = None; // build the CHT, caching the requested header as we pass through it. let cht = { @@ -258,8 +210,8 @@ impl Provider for T { total_difficulty: td, }; - if hdr.number() == req.block_number { - needed_hdr = Some(hdr); + if hdr.number() == req.num { + needed = Some((hdr, td)); } Some(info) @@ -268,17 +220,21 @@ impl Provider for T { } }; - match cht::build(req.cht_number, block_info) { + match cht::build(cht_number, block_info) { Some(cht) => cht, None => return None, // incomplete CHT. } }; - let needed_hdr = needed_hdr.expect("`needed_hdr` always set in loop, number checked before; qed"); + let (needed_hdr, needed_td) = needed.expect("`needed` always set in loop, number checked before; qed"); // prove our result. - match cht.prove(req.block_number, req.from_level) { - Ok(Some(proof)) => Some((needed_hdr, proof)), + match cht.prove(req.num, 0) { + Ok(Some(proof)) => Some(::request::HeaderProofResponse { + proof: proof, + hash: needed_hdr.hash(), + td: needed_td, + }), Ok(None) => None, Err(e) => { debug!(target: "les_provider", "Error looking up number in freshly-created CHT: {}", e); @@ -347,23 +303,27 @@ impl Provider for LightProvider { self.client.as_light_client().block_header(id) } - fn block_body(&self, _id: BlockId) -> Option { + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { None } - fn block_receipts(&self, _hash: &H256) -> Option { + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { None } - fn state_proof(&self, _req: request::StateProof) -> Vec { - Vec::new() + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + None } - fn contract_code(&self, _req: request::ContractCode) -> Bytes { - Vec::new() + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + None } - fn header_proof(&self, _req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + None + } + + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { None } diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 259f3def7..2b23d0380 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -113,6 +113,15 @@ pub enum Output { Number(u64), } +impl Output { + fn kind(&self) -> OutputKind { + match *self { + Output::Hash(_) => OutputKind::Hash, + Output::Number(_) => OutputKind::Number, + } + } +} + /// Response output kinds which can be used as back-references. #[derive(Debug, Clone, PartialEq, Eq)] pub enum OutputKind { @@ -145,6 +154,7 @@ impl From for HashOrNumber { } /// All request types, as they're sent over the network. +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Request { /// A request for block headers. Headers(IncompleteHeadersRequest), @@ -164,6 +174,27 @@ pub enum Request { // Transaction proof. } +/// All request types, as they're sent over the network. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CompleteRequest { + /// A request for block headers. + Headers(CompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(CompleteHeaderProofRequest), + // TransactionIndex, + /// A request for a block's receipts. + Receipts(CompleteReceiptsRequest), + /// A request for a block body. + Body(CompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(CompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(CompleteStorageRequest), + /// A request for contract code. + Code(CompleteCodeRequest), + // Transaction proof. +} + impl Request { fn kind(&self) -> RequestKind { match *self { @@ -210,10 +241,54 @@ impl Encodable for Request { } } +impl IncompleteRequest for Request { + type Complete = CompleteRequest; + + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match *self { + Request::Headers(ref req) => req.check_outputs(f), + Request::HeaderProof(ref req) => req.check_outputs(f), + Request::Receipts(ref req) => req.check_outputs(f), + Request::Body(ref req) => req.check_outputs(f), + Request::Account(ref req) => req.check_outputs(f), + Request::Storage(ref req) => req.check_outputs(f), + Request::Code(ref req) => req.check_outputs(f), + } + } + + fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { + match *self { + Request::Headers(ref req) => req.note_outputs(f), + Request::HeaderProof(ref req) => req.note_outputs(f), + Request::Receipts(ref req) => req.note_outputs(f), + Request::Body(ref req) => req.note_outputs(f), + Request::Account(ref req) => req.note_outputs(f), + Request::Storage(ref req) => req.note_outputs(f), + Request::Code(ref req) => req.note_outputs(f), + } + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + match self { + Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)), + Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)), + Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)), + Request::Body(req) => CompleteRequest::Body(req.fill(oracle)), + Request::Account(req) => CompleteRequest::Account(req.fill(oracle)), + Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)), + Request::Code(req) => CompleteRequest::Code(req.fill(oracle)), + } + } +} /// Kinds of requests. /// Doubles as the "ID" field of the request. #[repr(u8)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestKind { /// A request for headers. Headers = 0, @@ -252,6 +327,42 @@ impl Encodable for RequestKind { } } +/// All response types. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Response { + /// A response for block headers. + Headers(HeadersResponse), + /// A response for a header proof (from a CHT) + HeaderProof(HeaderProofResponse), + // TransactionIndex, + /// A response for a block's receipts. + Receipts(ReceiptsResponse), + /// A response for a block body. + Body(BodyResponse), + /// A response for a merkle proof of an account. + Account(AccountResponse), + /// A response for a merkle proof of contract storage. + Storage(StorageResponse), + /// A response for contract code. + Code(CodeResponse), + // Transaction proof. +} + +impl Response { + /// Fill reusable outputs by writing them into the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + match *self { + Response::Headers(res) => res.fill_outputs(f) + Response::HeaderProof(res) => res.fill_outputs(f) + Response::Receipts(res) => res.fill_outputs(f) + Response::Body(res) => res.fill_outputs(f) + Response::Account(res) => res.fill_outputs(f) + Response::Storage(res) => res.fill_outputs(f) + Response::Code(res) => res.fill_outputs(f) + } + } +} + /// A potentially incomplete request. pub trait IncompleteRequest: Sized { type Complete; @@ -369,7 +480,8 @@ pub mod header { /// The output of a request for headers. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Response { - header: Vec, + /// The headers requested. + pub headers: Vec, } impl Response { @@ -523,7 +635,6 @@ pub mod block_receipts { hash: hash, }) } - } /// A complete block receipts request. From b396b56e34e641e6e957ce573f5fdbdce9764079 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Mar 2017 17:18:26 +0100 Subject: [PATCH 18/91] encode and decode responses --- ethcore/light/src/provider.rs | 22 +- ethcore/light/src/types/request.rs | 491 +++++++++++++++++++++++++---- 2 files changed, 439 insertions(+), 74 deletions(-) diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 4e43296ab..4a9a96999 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -30,16 +30,9 @@ use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; use transaction_queue::TransactionQueue; - use request; -/// Defines the operations that a provider for `LES` must fulfill. -/// -/// These are defined at [1], but may be subject to change. -/// Requests which can't be fulfilled should return either an empty RLP list -/// or empty vector where appropriate. -/// -/// [1]: https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) +/// Defines the operations that a provider for the light subprotocol must fulfill. #[cfg_attr(feature = "ipc", ipc(client_ident="LightProviderClient"))] pub trait Provider: Send + Sync { /// Provide current blockchain info. @@ -149,12 +142,12 @@ impl Provider for T { BlockChainClient::block_header(self, id) } - fn block_body(&self, req: request::CompleteBodyRequest) -> Option; + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { BlockChainClient::block_body(self, id) .map(|body| ::request::BodyResponse { body: body }) } - fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { BlockChainClient::block_receipts(self, hash) .map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode(&x) }) } @@ -168,7 +161,7 @@ impl Provider for T { code_hash: acc.code_hash, storage_root: acc.storage_root, } - })) + }) } fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { @@ -177,7 +170,7 @@ impl Provider for T { proof: proof, value: item, } - })) + }) } fn contract_code(&self, req: request::ContractCode) -> Option { @@ -185,7 +178,7 @@ impl Provider for T { .map(|code| ::request::CodeResponse { code: code }) } - fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { let cht_number = match cht::block_to_cht_number(req.num) { Some(cht_num) => cht_num, None => { @@ -243,7 +236,7 @@ impl Provider for T { } } - fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option { use ethcore::transaction::Transaction; let id = BlockId::Hash(req.at); @@ -261,6 +254,7 @@ impl Provider for T { }.fake_sign(req.from); self.prove_transaction(transaction, id) + .map(|proof| ::request::ExecutionResponse { items: proof }) } fn ready_transactions(&self) -> Vec { diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 2b23d0380..7ad16ea4d 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -38,9 +38,9 @@ pub use self::block_body::{ Incomplete as IncompleteBodyRequest, Response as BodyResponse }; -pub use self::receipts::{ +pub use self::block_receipts::{ Complete as CompleteReceiptsRequest, - Incomplete as IncompleteReceiptsRequest + Incomplete as IncompleteReceiptsRequest, Response as ReceiptsResponse }; pub use self::account::{ @@ -58,6 +58,11 @@ pub use self::contract_code::{ Incomplete as IncompleteCodeRequest, Response as CodeResponse, }; +pub use self::execution::{ + Complete as CompleteExecutionRequest, + Incomplete as IncompleteExecutionRequest, + Response as ExecutionResponse, +}; /// Error indicating a reference to a non-existent or wrongly-typed output. pub struct NoSuchOutput; @@ -87,7 +92,7 @@ impl Decodable for Field { 1 => Ok({ let inner_rlp = rlp.at(1)?; Field::BackReference(inner_rlp.val_at(0)?, inner_rlp.val_at(1)?) - }) + }), _ => Err(DecoderError::Custom("Unknown discriminant for PIP field.")), } } @@ -171,7 +176,8 @@ pub enum Request { Storage(IncompleteStorageRequest), /// A request for contract code. Code(IncompleteCodeRequest), - // Transaction proof. + /// A request for proof of execution, + Execution(IncompleteExecutionRequest), } /// All request types, as they're sent over the network. @@ -192,19 +198,21 @@ pub enum CompleteRequest { Storage(CompleteStorageRequest), /// A request for contract code. Code(CompleteCodeRequest), - // Transaction proof. + /// A request for proof of execution, + Execution(CompleteExecutionRequest), } impl Request { - fn kind(&self) -> RequestKind { + fn kind(&self) -> Kind { match *self { - Request::Headers(_) => RequestKind::Headers, - Request::HeaderProof(_) => RequestKind::HeaderProof, - Request::Receipts(_) => RequestKind::Receipts, - Request::Body(_) => RequestKind::Body, - Request::Account(_) => RequestKind::Account, - Request::Storage(_) => RequestKind::Storage, - Request::Code(_) => RequestKind::Code, + Request::Headers(_) => Kind::Headers, + Request::HeaderProof(_) => Kind::HeaderProof, + Request::Receipts(_) => Kind::Receipts, + Request::Body(_) => Kind::Body, + Request::Account(_) => Kind::Account, + Request::Storage(_) => Kind::Storage, + Request::Code(_) => Kind::Code, + Request::Execution(_) => Kind::Execution, } } } @@ -213,14 +221,15 @@ impl Decodable for Request { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); - match rlp.val_at::(0)? { - RequestKind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), - RequestKind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), - RequestKind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), - RequestKind::Body => Ok(Request::Body(rlp.val_at(1)?)), - RequestKind::Account => Ok(Request::Account(rlp.val_at(1)?)), - RequestKind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), - RequestKind::Code => Ok(Request::Code(rlp.val_at(1)?)), + match rlp.val_at::(0)? { + Kind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), + Kind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Request::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Request::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Request::Code(rlp.val_at(1)?)), + Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)), } } } @@ -237,6 +246,7 @@ impl Encodable for Request { Request::Account(ref req) => s.append(req), Request::Storage(ref req) => s.append(req), Request::Code(ref req) => s.append(req), + Request::Execution(ref req) => s.append(req), }; } } @@ -255,6 +265,7 @@ impl IncompleteRequest for Request { Request::Account(ref req) => req.check_outputs(f), Request::Storage(ref req) => req.check_outputs(f), Request::Code(ref req) => req.check_outputs(f), + Request::Execution(ref req) => req.check_outputs(f), } } @@ -267,29 +278,31 @@ impl IncompleteRequest for Request { Request::Account(ref req) => req.note_outputs(f), Request::Storage(ref req) => req.note_outputs(f), Request::Code(ref req) => req.note_outputs(f), + Request::Execution(ref req) => req.note_outputs(f), } } fn fill(self, oracle: F) -> Result where F: Fn(usize, usize) -> Result { - match self { - Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)), - Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)), - Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)), - Request::Body(req) => CompleteRequest::Body(req.fill(oracle)), - Request::Account(req) => CompleteRequest::Account(req.fill(oracle)), - Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)), - Request::Code(req) => CompleteRequest::Code(req.fill(oracle)), - } + Ok(match self { + Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)?), + Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)?), + Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)?), + Request::Body(req) => CompleteRequest::Body(req.fill(oracle)?), + Request::Account(req) => CompleteRequest::Account(req.fill(oracle)?), + Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)?), + Request::Code(req) => CompleteRequest::Code(req.fill(oracle)?), + Request::Execution(req) => CompleteRequest::Execution(req.fill(oracle)?), + }) } } /// Kinds of requests. /// Doubles as the "ID" field of the request. #[repr(u8)] -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum RequestKind { +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Kind { /// A request for headers. Headers = 0, HeaderProof = 1, @@ -299,29 +312,29 @@ pub enum RequestKind { Account = 5, Storage = 6, Code = 7, - // TransactionProof = 8, + Execution = 8, } -impl Decodable for RequestKind { +impl Decodable for Kind { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); match rlp.as_val::()? { - 0 => Ok(RequestKind::Headers), - 1 => Ok(RequestKind::HeaderProof), - // 2 => Ok(RequestKind::TransactionIndex, - 3 => Ok(RequestKind::Receipts), - 4 => Ok(RequestKind::Body), - 5 => Ok(RequestKind::Account), - 6 => Ok(RequestKind::Storage), - 7 => Ok(RequestKind::Code), - // 8 => Ok(RequestKind::TransactionProof), + 0 => Ok(Kind::Headers), + 1 => Ok(Kind::HeaderProof), + // 2 => Ok(Kind::TransactionIndex, + 3 => Ok(Kind::Receipts), + 4 => Ok(Kind::Body), + 5 => Ok(Kind::Account), + 6 => Ok(Kind::Storage), + 7 => Ok(Kind::Code), + 8 => Ok(Kind::Execution), _ => Err(DecoderError::Custom("Unknown PIP request ID.")), } } } -impl Encodable for RequestKind { +impl Encodable for Kind { fn rlp_append(&self, s: &mut RlpStream) { s.append(self as &u8); } @@ -345,22 +358,71 @@ pub enum Response { Storage(StorageResponse), /// A response for contract code. Code(CodeResponse), - // Transaction proof. + /// A response for proof of execution, + Execution(ExecutionResponse), } impl Response { /// Fill reusable outputs by writing them into the function. pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { match *self { - Response::Headers(res) => res.fill_outputs(f) - Response::HeaderProof(res) => res.fill_outputs(f) - Response::Receipts(res) => res.fill_outputs(f) - Response::Body(res) => res.fill_outputs(f) - Response::Account(res) => res.fill_outputs(f) - Response::Storage(res) => res.fill_outputs(f) - Response::Code(res) => res.fill_outputs(f) + Response::Headers(res) => res.fill_outputs(f), + Response::HeaderProof(res) => res.fill_outputs(f), + Response::Receipts(res) => res.fill_outputs(f), + Response::Body(res) => res.fill_outputs(f), + Response::Account(res) => res.fill_outputs(f), + Response::Storage(res) => res.fill_outputs(f), + Response::Code(res) => res.fill_outputs(f), + Response::Execution(res) => res.fill_outputs(f), } } + + fn kind(&self) -> Kind { + match *self { + Response::Headers(_) => Kind::Headers, + Response::HeaderProof(_) => Kind::HeaderProof, + Response::Receipts(_) => Kind::Receipts, + Response::Body(_) => Kind::Body, + Response::Account(_) => Kind::Account, + Response::Storage(_) => Kind::Storage, + Response::Code(_) => Kind::Code, + Respnse::Execution(_) => Kind::Execution, + } + } +} + +impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.val_at::(0)? { + Kind::Headers => Ok(Response::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Response::HeaderProof(rlp.val_at(1)?)), + Kind::Receipts => Ok(Response::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Response::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Response::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), + Kind::Execution=> Ok(Response::Execution(rlp.val_at(1)?)), + } + } +} + +impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2).append(&self.kind()); + + match *self { + Response::Headers(ref res) => s.append(res), + Response::HeaderProof(ref res) => s.append(res), + Response::Receipts(ref res) => s.append(res), + Response::Body(ref res) => s.append(res), + Response::Account(ref res) => s.append(res), + Response::Storage(ref res) => s.append(res), + Response::Code(ref res) => s.append(res), + Response::Execution(ref res) => s.append(res), + }; + } } /// A potentially incomplete request. @@ -390,7 +452,7 @@ pub trait IncompleteRequest: Sized { pub mod header { use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::U256; /// Potentially incomplete headers request. @@ -488,12 +550,41 @@ pub mod header { /// Fill reusable outputs by writing them into the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + use ethcore::header::Header as FullHeader; + let rlp = decoder.as_rlp(); + + let mut headers = Vec::new(); + + for item in rlp.at(0)?.iter() { + // check that it's a valid encoding. + // TODO: just return full headers here? + let _: FullHeader = item.as_val()?; + headers.push(encoded::Header::new(item.as_raw().to_owned())); + } + + Ok(Response { + headers: headers, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.headers.len()); + for header in &self.headers { + s.append_raw(header.rlp().as_raw(), 1); + } + } + } } /// Request and response for header proofs. pub mod header_proof { use super::{Field, NoSuchOutput, OutputKind, Output}; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete header proof request. @@ -576,12 +667,34 @@ pub mod header_proof { f(1, Output::Hash(self.hash)); } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + proof: rlp.val_at(0)?, + hash: rlp.val_at(1)?, + td: rlp.val_at(2)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append(&self.proof) + .append(&self.hash) + .append(&self.td); + } + } } /// Request and response for block receipts pub mod block_receipts { use super::{Field, NoSuchOutput, OutputKind, Output}; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use ethcore::receipt::Receipt; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete block receipts request. @@ -655,13 +768,29 @@ pub mod block_receipts { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + receipts: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.receipts); + } + } } /// Request and response for a block body pub mod block_body { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete block body request. @@ -736,13 +865,38 @@ pub mod block_body { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + use ethcore::header::Header as FullHeader; + use ethcore::transaction::SignedTransaction; + + let rlp = decoder.as_rlp(); + let body_rlp = rlp.at(0)?; + + // check body validity. + let _: Vec = rlp.val_at(0)?; + let _: Vec = rlp.val_at(1)?; + + Ok(Response { + body: encoded::Body::new(body_rlp.as_raw().to_owned()), + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append_raw(&self.body.rlp().as_raw(), 2); + } + } } /// A request for an account proof. pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an account proof. @@ -852,13 +1006,38 @@ pub mod account { f(1, Output::Hash(self.storage_root)); } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + proof: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + balance: rlp.val_at(2)?, + code_hash: rlp.val_at(3)?, + storage_root: rlp.val_at(4)? + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5) + .append(&self.proof) + .append(&self.nonce) + .append(&self.balance) + .append(&self.code_hash) + .append(&self.storage_root) + } + } } /// A request for a storage proof. pub mod storage { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an storage proof. @@ -979,16 +1158,35 @@ pub mod storage { f(0, Output::Hash(self.value)); } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + proof: rlp.val_at(0)?, + value: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.proof) + .append(&self.value); + } + } } /// A request for contract code. pub mod contract_code { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; - /// Potentially incomplete _ request. + /// Potentially incomplete contract code request. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Incomplete { /// The block hash to request the state for. @@ -1080,4 +1278,177 @@ pub mod contract_code { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + code: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.code); + } + } +} + +/// A request for proof of execution. +pub mod execution { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use ethcore::transaction::Action; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; + use util::{Bytes, Address, U256, H256, DBValue}; + + /// Potentially incomplete execution proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } + + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address: rlp.val_at(1)?, + action: rlp.val_at(2)?, + gas: rlp.val_at(3)?, + gas_price: rlp.val_at(4)?, + value: rlp.val_at(5)?, + data: rlp.val_at(6)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(7) + .append(&self.block_hash) + .append(&self.from); + + match *self.action { + Action::Create => s.append_empty_data(), + Action::Call(ref addr) => s.append(addr), + }; + + s.append(&self.gas) + .append(&self.gas_price) + .append(&self.value) + .append(&self.data); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + + Ok(()) + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + from: self.from, + action: self.action, + gas: self.gas, + gas_price: self.gas_price, + value: self.value, + data: self.data, + }) + } + + } + + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } + + /// The output of a request for proof of execution + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// All state items (trie nodes, code) necessary to re-prove the transaction. + pub items: Vec, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + let mut items = Vec::new(); + for raw_item in rlp.at(0)?.iter() { + let mut item = DBValue::new(); + item.append_slice(raw_item.data()); + items.push(item); + } + + Ok(Response { + items: items, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(&self.items.len()); + + for item in &self.items { + s.append(&&**item); + } + } + } } From 04291fe71e37fd8679f8a76ce853cadcbc90f844 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Mar 2017 19:48:07 +0100 Subject: [PATCH 19/91] complete initial request changes --- ethcore/light/Cargo.toml | 2 +- ethcore/light/src/lib.rs | 3 +- ethcore/light/src/net/context.rs | 14 +- ethcore/light/src/net/mod.rs | 805 ++--------------------- ethcore/light/src/net/request_credits.rs | 166 ++--- ethcore/light/src/net/request_set.rs | 17 +- ethcore/light/src/provider.rs | 14 +- ethcore/light/src/request_builder.rs | 116 ++++ ethcore/light/src/types/request.rs | 97 +-- 9 files changed, 273 insertions(+), 961 deletions(-) create mode 100644 ethcore/light/src/request_builder.rs diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 9e10449fb..cab75a36a 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -1,5 +1,5 @@ [package] -description = "Parity LES primitives" +description = "Parity Light Client Implementation" homepage = "http://parity.io" license = "GPL-3.0" name = "ethcore-light" diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ada58d8de..b15c85242 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -35,9 +35,10 @@ pub mod client; pub mod cht; pub mod net; -pub mod on_demand; +//pub mod on_demand; pub mod transaction_queue; pub mod cache; +pub mod request_builder; #[cfg(not(feature = "ipc"))] pub mod provider; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index bd0c8a6bb..332d497a1 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -12,7 +12,7 @@ // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// along with Parity. If not, see . //! I/O and event context generalizations. @@ -89,10 +89,6 @@ pub trait BasicContext { // TODO: maybe just put this on a timer in LightProtocol? fn make_announcement(&self, announcement: Announcement); - /// Find the maximum number of requests of a specific type which can be made from - /// supplied peer. - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize; - /// Disconnect a peer. fn disconnect_peer(&self, peer: PeerId); @@ -131,10 +127,6 @@ impl<'a> BasicContext for TickCtx<'a> { self.proto.make_announcement(self.io, announcement); } - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.proto.max_requests(peer, kind) - } - fn disconnect_peer(&self, peer: PeerId) { self.io.disconnect_peer(peer); } @@ -168,10 +160,6 @@ impl<'a> BasicContext for Ctx<'a> { self.proto.make_announcement(self.io, announcement); } - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.proto.max_requests(peer, kind) - } - fn disconnect_peer(&self, peer: PeerId) { self.io.disconnect_peer(peer); } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 181f95e95..1b2433fbe 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! LES Protocol Version 1 implementation. +//! PIP Protocol Version 1 implementation. //! //! This uses a "Provider" to answer requests. -//! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) use ethcore::transaction::{Action, UnverifiedTransaction}; use ethcore::receipt::Receipt; @@ -35,7 +34,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{self, HashOrNumber, Request}; +use request::{self, HashOrNumber, Request, Response}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; @@ -83,43 +82,24 @@ mod packet { // announcement of new block hashes or capabilities. pub const ANNOUNCE: u8 = 0x01; - // request and response for block headers - pub const GET_BLOCK_HEADERS: u8 = 0x02; - pub const BLOCK_HEADERS: u8 = 0x03; - - // request and response for block bodies - pub const GET_BLOCK_BODIES: u8 = 0x04; - pub const BLOCK_BODIES: u8 = 0x05; - - // request and response for transaction receipts. - pub const GET_RECEIPTS: u8 = 0x06; - pub const RECEIPTS: u8 = 0x07; - - // request and response for merkle proofs. - pub const GET_PROOFS: u8 = 0x08; - pub const PROOFS: u8 = 0x09; - - // request and response for contract code. - pub const GET_CONTRACT_CODES: u8 = 0x0a; - pub const CONTRACT_CODES: u8 = 0x0b; + // request and response. + pub const REQUEST: u8 = 0x02; + pub const RESPONSE: u8 = 0x03; // relay transactions to peers. - pub const SEND_TRANSACTIONS: u8 = 0x0c; - - // request and response for header proofs in a CHT. - pub const GET_HEADER_PROOFS: u8 = 0x0d; - pub const HEADER_PROOFS: u8 = 0x0e; + pub const SEND_TRANSACTIONS: u8 = 0x04; // request and response for transaction proof. - pub const GET_TRANSACTION_PROOF: u8 = 0x0f; - pub const TRANSACTION_PROOF: u8 = 0x10; + // TODO: merge with request/response. + pub const GET_TRANSACTION_PROOF: u8 = 0x05; + pub const TRANSACTION_PROOF: u8 = 0x06; } // timeouts for different kinds of requests. all values are in milliseconds. // TODO: variable timeouts based on request count. mod timeout { pub const HANDSHAKE: i64 = 2500; - pub const HEADERS: i64 = 5000; + pub const HEADERS: i64 = 2500; pub const BODIES: i64 = 5000; pub const RECEIPTS: i64 = 3500; pub const PROOFS: i64 = 4000; @@ -159,17 +139,6 @@ pub struct Peer { } impl Peer { - // check the maximum cost of a request, returning an error if there's - // not enough credits left. - // returns the calculated maximum cost. - fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result { - flow_params.recharge(&mut self.local_credits); - - let max_cost = flow_params.compute_cost(kind, max); - self.local_credits.deduct_cost(max_cost)?; - Ok(max_cost) - } - // refund credits for a request. returns new amount of credits. fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { flow_params.refund(&mut self.local_credits, amount); @@ -197,20 +166,8 @@ pub trait Handler: Send + Sync { fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { } /// Called when a peer requests relay of some transactions. fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { } - /// Called when a peer responds with block bodies. - fn on_block_bodies(&self, _ctx: &EventContext, _req_id: ReqId, _bodies: &[Bytes]) { } - /// Called when a peer responds with block headers. - fn on_block_headers(&self, _ctx: &EventContext, _req_id: ReqId, _headers: &[Bytes]) { } - /// Called when a peer responds with block receipts. - fn on_receipts(&self, _ctx: &EventContext, _req_id: ReqId, _receipts: &[Vec]) { } - /// Called when a peer responds with state proofs. Each proof should be a series of trie - /// nodes in ascending order by distance from the root. - fn on_state_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[Vec]) { } - /// Called when a peer responds with contract code. - fn on_code(&self, _ctx: &EventContext, _req_id: ReqId, _codes: &[Bytes]) { } - /// Called when a peer responds with header proofs. Each proof should be a block header coupled - /// with a series of trie nodes is ascending order by distance from the root. - fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } + /// Called when a peer responds to requests. + fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _relay: &[Response]) { } /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } /// Called to "tick" the handler periodically. @@ -307,7 +264,7 @@ pub struct LightProtocol { impl LightProtocol { /// Create a new instance of the protocol manager. pub fn new(provider: Arc, params: Params) -> Self { - debug!(target: "les", "Initializing LES handler"); + debug!(target: "pip", "Initializing light protocol handler"); let genesis_hash = provider.chain_info().genesis_hash; LightProtocol { @@ -339,62 +296,15 @@ impl LightProtocol { ) } - /// Check the maximum amount of requests of a specific type - /// which a peer would be able to serve. Returns zero if the - /// peer is unknown or has no credit parameters. - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.peers.read().get(&peer).and_then(|peer| { - let mut peer = peer.lock(); - match peer.remote_flow { - Some((ref mut c, ref flow)) => { - flow.recharge(c); - Some(flow.max_amount(&*c, kind)) - } - None => None, - } - }).unwrap_or(0) - } - /// Make a request to a peer. /// /// Fails on: nonexistent peer, network error, peer not server, /// insufficient credits. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. + // TODO: pass `Requests`. pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { - let peers = self.peers.read(); - let peer = peers.get(peer_id).ok_or_else(|| Error::UnknownPeer)?; - let mut peer = peer.lock(); - - match peer.remote_flow { - Some((ref mut c, ref flow)) => { - flow.recharge(c); - let max = flow.compute_cost(request.kind(), request.amount()); - c.deduct_cost(max)?; - } - None => return Err(Error::NotServer), - } - - let req_id = self.req_id.fetch_add(1, Ordering::SeqCst); - let packet_data = encode_request(&request, req_id); - - trace!(target: "les", "Dispatching request {} to peer {}", req_id, peer_id); - - let packet_id = match request.kind() { - request::Kind::Headers => packet::GET_BLOCK_HEADERS, - request::Kind::Bodies => packet::GET_BLOCK_BODIES, - request::Kind::Receipts => packet::GET_RECEIPTS, - request::Kind::StateProofs => packet::GET_PROOFS, - request::Kind::Codes => packet::GET_CONTRACT_CODES, - request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS, - request::Kind::TransactionProof => packet::GET_TRANSACTION_PROOF, - }; - - io.send(*peer_id, packet_id, packet_data); - - peer.pending_requests.insert(ReqId(req_id), request, SteadyTime::now()); - - Ok(ReqId(req_id)) + unimplemented!() } /// Make an announcement of new chain head and capabilities to all peers. @@ -427,7 +337,7 @@ impl LightProtocol { None => { // both values will always originate locally -- this means something // has gone really wrong - debug!(target: "les", "couldn't compute reorganization depth between {:?} and {:?}", + debug!(target: "pip", "couldn't compute reorganization depth between {:?} and {:?}", &announcement.head_hash, &peer_info.sent_head); 0 } @@ -474,11 +384,10 @@ impl LightProtocol { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind); + trace!(target: "pip", "pre-verifying response from peer {}, kind={:?}", peer, kind); - let mut had_req = false; let peers = self.peers.read(); - let maybe_err = match peers.get(peer) { + let res = match peers.get(peer) { Some(peer_info) => { let mut peer_info = peer_info.lock(); let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); @@ -486,69 +395,37 @@ impl LightProtocol { match (req_info, flow_info) { (Some(request), Some(flow_info)) => { - had_req = true; - let &mut (ref mut c, ref mut flow) = flow_info; let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); c.update_to(actual_credits); - if request.kind() != kind { - Some(Error::UnsolicitedResponse) - } else { - None - } + Ok(()) } - (None, _) => Some(Error::UnsolicitedResponse), - (_, None) => Some(Error::NotServer), // really should be impossible. + (None, _) => Err(Error::UnsolicitedResponse), + (_, None) => Err(Error::NotServer), // really should be impossible. } } - None => Some(Error::UnknownPeer), // probably only occurs in a race of some kind. + None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind. }; - if had_req { - let id_guard = IdGuard::new(peers, *peer, req_id); - match maybe_err { - Some(err) => Err(err), - None => Ok(id_guard) - } - } else { - Err(maybe_err.expect("every branch without a request leads to error; qed")) - } + res.map(|_| IdGuard::new(peers, *peer, req_id)) } - /// Handle an LES packet using the given io context. + /// Handle a packet using the given io context. /// Packet data is _untrusted_, which means that invalid data won't lead to /// issues. pub fn handle_packet(&self, io: &IoContext, peer: &PeerId, packet_id: u8, data: &[u8]) { let rlp = UntrustedRlp::new(data); - trace!(target: "les", "Incoming packet {} from peer {}", packet_id, peer); + trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer); // handle the packet let res = match packet_id { packet::STATUS => self.status(peer, io, rlp), packet::ANNOUNCE => self.announcement(peer, io, rlp), - packet::GET_BLOCK_HEADERS => self.get_block_headers(peer, io, rlp), - packet::BLOCK_HEADERS => self.block_headers(peer, io, rlp), - - packet::GET_BLOCK_BODIES => self.get_block_bodies(peer, io, rlp), - packet::BLOCK_BODIES => self.block_bodies(peer, io, rlp), - - packet::GET_RECEIPTS => self.get_receipts(peer, io, rlp), - packet::RECEIPTS => self.receipts(peer, io, rlp), - - packet::GET_PROOFS => self.get_proofs(peer, io, rlp), - packet::PROOFS => self.proofs(peer, io, rlp), - - packet::GET_CONTRACT_CODES => self.get_contract_code(peer, io, rlp), - packet::CONTRACT_CODES => self.contract_code(peer, io, rlp), - - packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), - packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), - - packet::GET_TRANSACTION_PROOF => self.get_transaction_proof(peer, io, rlp), - packet::TRANSACTION_PROOF => self.transaction_proof(peer, io, rlp), + packet::REQUEST => self.request(peer, io, rlp), + packet::RESPONSE => self.response(peer, io, rlp), packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), @@ -577,7 +454,7 @@ impl LightProtocol { .collect(); for slowpoke in slowpokes { - debug!(target: "les", "Peer {} handshake timed out", slowpoke); + debug!(target: "pip", "Peer {} handshake timed out", slowpoke); pending.remove(&slowpoke); io.disconnect_peer(slowpoke); } @@ -587,7 +464,7 @@ impl LightProtocol { { for (peer_id, peer) in self.peers.read().iter() { if peer.lock().pending_requests.check_timeout(now) { - debug!(target: "les", "Peer {} request timeout", peer_id); + debug!(target: "pip", "Peer {} request timeout", peer_id); io.disconnect_peer(*peer_id); } } @@ -631,7 +508,7 @@ impl LightProtocol { /// called when a peer disconnects. pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) { - trace!(target: "les", "Peer {} disconnecting", peer); + trace!(target: "pip", "Peer {} disconnecting", peer); self.pending_peers.write().remove(&peer); let unfulfilled = match self.peers.write().remove(&peer) { @@ -686,7 +563,7 @@ impl LightProtocol { let (status, capabilities, flow_params) = status::parse_handshake(data)?; - trace!(target: "les", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); + trace!(target: "pip", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) { return Err(Error::WrongNetwork); @@ -723,7 +600,7 @@ impl LightProtocol { // Handle an announcement. fn announcement(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { if !self.peers.read().contains_key(peer) { - debug!(target: "les", "Ignoring announcement from unknown peer"); + debug!(target: "pip", "Ignoring announcement from unknown peer"); return Ok(()) } @@ -765,447 +642,19 @@ impl LightProtocol { Ok(()) } - // Handle a request for block headers. - fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_HEADERS: usize = 512; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - let data = data.at(1)?; - - let start_block = { - if data.at(0)?.size() == 32 { - HashOrNumber::Hash(data.val_at(0)?) - } else { - HashOrNumber::Number(data.val_at(0)?) - } - }; - - let req = request::Headers { - start: start_block, - max: ::std::cmp::min(MAX_HEADERS, data.val_at(1)?), - skip: data.val_at(2)?, - reverse: data.val_at(3)?, - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Headers, req.max)?; - - let response = self.provider.block_headers(req); - let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len()); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - io.respond(packet::BLOCK_HEADERS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for header in response { - stream.append_raw(&header.into_inner(), 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for block headers. - fn block_headers(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Headers, &raw)?; - let raw_headers: Vec<_> = raw.at(2)?.iter().map(|x| x.as_raw().to_owned()).collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_block_headers(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_headers); - } - - Ok(()) - } - - // Handle a request for block bodies. - fn get_block_bodies(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_BODIES: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = request::Bodies { - block_hashes: data.at(1)?.iter() - .take(MAX_BODIES) - .map(|x| x.as_val()) - .collect::>()? - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Bodies, req.block_hashes.len())?; - - let response = self.provider.block_bodies(req); - let response_len = response.iter().filter(|x| x.is_some()).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::BLOCK_BODIES, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for body in response { - match body { - Some(body) => stream.append_raw(&body.into_inner(), 1), - None => stream.append_empty_data(), - }; - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for block bodies. - fn block_bodies(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Bodies, &raw)?; - let raw_bodies: Vec = raw.at(2)?.iter().map(|x| x.as_raw().to_owned()).collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_block_bodies(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_bodies); - } - - Ok(()) - } - - // Handle a request for receipts. - fn get_receipts(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_RECEIPTS: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = request::Receipts { - block_hashes: data.at(1)?.iter() - .take(MAX_RECEIPTS) - .map(|x| x.as_val()) - .collect::>()? - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Receipts, req.block_hashes.len())?; - - let response = self.provider.receipts(req); - let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::RECEIPTS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for receipts in response { - stream.append_raw(&receipts, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for receipts. - fn receipts(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Receipts, &raw)?; - let raw_receipts: Vec> = raw.at(2)? - .iter() - .map(|x| x.as_val()) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_receipts(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_receipts); - } - - Ok(()) - } - - // Handle a request for proofs. - fn get_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_PROOFS: usize = 128; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_PROOFS).map(|x| { - Ok(request::StateProof { - block: x.val_at(0)?, - key1: x.val_at(1)?, - key2: if x.at(2)?.is_empty() { None } else { Some(x.val_at(2)?) }, - from_level: x.val_at(3)?, - }) - }).collect(); - - request::StateProofs { - requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::StateProofs, req.requests.len())?; - - let response = self.provider.proofs(req); - let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::PROOFS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for proof in response { - stream.append_raw(&proof, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for proofs. - fn proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::StateProofs, &raw)?; - - let raw_proofs: Vec> = raw.at(2)?.iter() - .map(|x| x.iter().map(|node| node.as_raw().to_owned()).collect()) - .collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_state_proofs(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proofs); - } - - Ok(()) - } - - // Handle a request for contract code. - fn get_contract_code(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_CODES: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_CODES).map(|x| { - Ok(request::ContractCode { - block_hash: x.val_at(0)?, - account_key: x.val_at(1)?, - }) - }).collect(); - - request::ContractCodes { - code_requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Codes, req.code_requests.len())?; - - let response = self.provider.contract_codes(req); - let response_len = response.iter().filter(|x| !x.is_empty()).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::CONTRACT_CODES, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for code in response { - stream.append(&code); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for contract code. - fn contract_code(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Codes, &raw)?; - - let raw_code: Vec = raw.at(2)?.iter() - .map(|x| x.as_val()) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_code(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_code); - } - - Ok(()) - } - - // Handle a request for header proofs - fn get_header_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_PROOFS: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_PROOFS).map(|x| { - Ok(request::HeaderProof { - cht_number: x.val_at(0)?, - block_number: x.val_at(1)?, - from_level: x.val_at(2)?, - }) - }).collect(); - - request::HeaderProofs { - requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::HeaderProofs, req.requests.len())?; - - let response = self.provider.header_proofs(req); - let response_len = response.iter().filter(|x| &x[..] != ::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::HEADER_PROOFS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for proof in response { - stream.append_raw(&proof, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for header proofs - fn header_proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - fn decode_res(raw: UntrustedRlp) -> Result<(Bytes, Vec), ::rlp::DecoderError> { - Ok(( - raw.val_at(0)?, - raw.at(1)?.iter().map(|x| x.as_raw().to_owned()).collect(), - )) - } - - let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; - let raw_proofs: Vec<_> = raw.at(2)?.iter() - .map(decode_res) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_header_proofs(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proofs); - } - - Ok(()) - } - - // Receive a request for proof-of-execution. - fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - // refuse to execute more than this amount of gas at once. - // this is appx. the point at which the proof of execution would no longer fit in - // a single Devp2p packet. + fn request(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + // the maximum amount of requests we'll fill in a single packet. + const MAX_REQUESTS: usize = 512; + // the maximum amount of gas we'll prove execution of in a single packet. const MAX_GAS: usize = 50_000_000; - use util::Uint; + + use ::request_builder::RequestBuilder; let peers = self.peers.read(); let peer = match peers.get(peer) { Some(peer) => peer, None => { - debug!(target: "les", "Ignoring request from unknown peer"); + debug!(target: "pip", "Ignoring request from unknown peer"); return Ok(()) } }; @@ -1213,68 +662,11 @@ impl LightProtocol { let req_id: u64 = raw.val_at(0)?; - let req = { - let req_rlp = raw.at(1)?; - request::TransactionProof { - at: req_rlp.val_at(0)?, - from: req_rlp.val_at(1)?, - action: if req_rlp.at(2)?.is_empty() { - Action::Create - } else { - Action::Call(req_rlp.val_at(2)?) - }, - gas: ::std::cmp::min(req_rlp.val_at(3)?, MAX_GAS.into()), - gas_price: req_rlp.val_at(4)?, - value: req_rlp.val_at(5)?, - data: req_rlp.val_at(6)?, - } - }; - - // always charge the peer for all the gas. - peer.deduct_max(&self.flow_params, request::Kind::TransactionProof, req.gas.low_u64() as usize)?; - - let response = match self.provider.transaction_proof(req) { - Some(res) => res, - None => vec![], - }; - - let cur_credits = peer.local_credits.current(); - - io.respond(packet::TRANSACTION_PROOF, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for state_item in response { - stream.append(&&state_item[..]); - } - - stream.out() - }); - - Ok(()) + unimplemented!() } - // Receive a response for proof-of-execution. - fn transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; - let raw_proof: Vec = raw.at(2)?.iter() - .map(|rlp| { - let mut db_val = DBValue::new(); - db_val.append_slice(rlp.data()?); - Ok(db_val) - }) - .collect::, ::rlp::DecoderError>>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_transaction_proof(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proof); - } - - Ok(()) + fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + unimplemented!() } // Receive a set of transactions to relay. @@ -1286,7 +678,7 @@ impl LightProtocol { .map(|x| x.as_val::()) .collect::>()?; - debug!(target: "les", "Received {} transactions to relay from peer {}", txs.len(), peer); + debug!(target: "pip", "Received {} transactions to relay from peer {}", txs.len(), peer); for handler in &self.handlers { handler.on_transactions(&Ctx { @@ -1305,11 +697,11 @@ fn punish(peer: PeerId, io: &IoContext, e: Error) { match e.punishment() { Punishment::None => {} Punishment::Disconnect => { - debug!(target: "les", "Disconnecting peer {}: {}", peer, e); + debug!(target: "pip", "Disconnecting peer {}: {}", peer, e); io.disconnect_peer(peer) } Punishment::Disable => { - debug!(target: "les", "Disabling peer {}: {}", peer, e); + debug!(target: "pip", "Disabling peer {}: {}", peer, e); io.disable_peer(peer) } } @@ -1339,112 +731,7 @@ impl NetworkProtocolHandler for LightProtocol { match timer { TIMEOUT => self.timeout_check(io), TICK_TIMEOUT => self.tick_handlers(io), - _ => warn!(target: "les", "received timeout on unknown token {}", timer), - } - } -} - -// Helper for encoding the request to RLP with the given ID. -fn encode_request(req: &Request, req_id: usize) -> Vec { - match *req { - Request::Headers(ref headers) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(4); - - match headers.start { - HashOrNumber::Hash(ref hash) => stream.append(hash), - HashOrNumber::Number(ref num) => stream.append(num), - }; - - stream - .append(&headers.max) - .append(&headers.skip) - .append(&headers.reverse); - - stream.out() - } - Request::Bodies(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.block_hashes.len()); - - for hash in &request.block_hashes { - stream.append(hash); - } - - stream.out() - } - Request::Receipts(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.block_hashes.len()); - - for hash in &request.block_hashes { - stream.append(hash); - } - - stream.out() - } - Request::StateProofs(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.requests.len()); - - for proof_req in &request.requests { - stream.begin_list(4) - .append(&proof_req.block) - .append(&proof_req.key1); - - match proof_req.key2 { - Some(ref key2) => stream.append(key2), - None => stream.append_empty_data(), - }; - - stream.append(&proof_req.from_level); - } - - stream.out() - } - Request::Codes(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.code_requests.len()); - - for code_req in &request.code_requests { - stream.begin_list(2) - .append(&code_req.block_hash) - .append(&code_req.account_key); - } - - stream.out() - } - Request::HeaderProofs(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.requests.len()); - - for proof_req in &request.requests { - stream.begin_list(3) - .append(&proof_req.cht_number) - .append(&proof_req.block_number) - .append(&proof_req.from_level); - } - - stream.out() - } - Request::TransactionProof(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(7) - .append(&request.at) - .append(&request.from); - - match request.action { - Action::Create => stream.append_empty_data(), - Action::Call(ref to) => stream.append(to), - }; - - stream - .append(&request.gas) - .append(&request.gas_price) - .append(&request.value) - .append(&request.data); - - stream.out() + _ => warn!(target: "pip", "received timeout on unknown token {}", timer), } } } diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 97aa9b431..e3821e05a 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -26,7 +26,7 @@ //! Current default costs are picked completely arbitrarily, not based //! on any empirical timings or mathematical models. -use request; +use request::{self, Request}; use super::packet; use super::error::Error; @@ -34,10 +34,6 @@ use rlp::*; use util::U256; use time::{Duration, SteadyTime}; -/// A request cost specification. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Cost(pub U256, pub U256); - /// Credits value. /// /// Produced and recharged using `FlowParams`. @@ -81,93 +77,43 @@ impl Credits { /// A cost table, mapping requests to base and per-request costs. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CostTable { - headers: Cost, // cost per header - bodies: Cost, - receipts: Cost, - state_proofs: Cost, - contract_codes: Cost, - header_proofs: Cost, - transaction_proof: Cost, // cost per gas. + base: U256, // cost per packet. + headers: U256, // cost per header + body: U256, + receipts: U256, + account: U256, + storage: U256, + code: U256, + header_proof: U256, + transaction_proof: U256, // cost per gas. } impl Default for CostTable { fn default() -> Self { // arbitrarily chosen constants. CostTable { - headers: Cost(100000.into(), 10000.into()), - bodies: Cost(150000.into(), 15000.into()), - receipts: Cost(50000.into(), 5000.into()), - state_proofs: Cost(250000.into(), 25000.into()), - contract_codes: Cost(200000.into(), 20000.into()), - header_proofs: Cost(150000.into(), 15000.into()), - transaction_proof: Cost(100000.into(), 2.into()), + base: 100000.into(), + headers: 10000.into(), + body: 15000.into(), + receipts: 5000.into(), + account: 25000.into(), + storage: 25000.into(), + code: 20000.into(), + header_proof: 15000.into(), + transaction_proof: 2.into(), } } } impl RlpEncodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { - fn append_cost(s: &mut RlpStream, msg_id: u8, cost: &Cost) { - s.begin_list(3) - .append(&msg_id) - .append(&cost.0) - .append(&cost.1); - } - - s.begin_list(7); - - append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers); - append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies); - append_cost(s, packet::GET_RECEIPTS, &self.receipts); - append_cost(s, packet::GET_PROOFS, &self.state_proofs); - append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes); - append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs); - append_cost(s, packet::GET_TRANSACTION_PROOF, &self.transaction_proof); + unimplemented!() } } impl RlpDecodable for CostTable { fn decode(decoder: &D) -> Result where D: Decoder { - let rlp = decoder.as_rlp(); - - let mut headers = None; - let mut bodies = None; - let mut receipts = None; - let mut state_proofs = None; - let mut contract_codes = None; - let mut header_proofs = None; - let mut transaction_proof = None; - - for row in rlp.iter() { - let msg_id: u8 = row.val_at(0)?; - let cost = { - let base = row.val_at(1)?; - let per = row.val_at(2)?; - - Cost(base, per) - }; - - match msg_id { - packet::GET_BLOCK_HEADERS => headers = Some(cost), - packet::GET_BLOCK_BODIES => bodies = Some(cost), - packet::GET_RECEIPTS => receipts = Some(cost), - packet::GET_PROOFS => state_proofs = Some(cost), - packet::GET_CONTRACT_CODES => contract_codes = Some(cost), - packet::GET_HEADER_PROOFS => header_proofs = Some(cost), - packet::GET_TRANSACTION_PROOF => transaction_proof = Some(cost), - _ => return Err(DecoderError::Custom("Unrecognized message in cost table")), - } - } - - Ok(CostTable { - headers: headers.ok_or(DecoderError::Custom("No headers cost specified"))?, - bodies: bodies.ok_or(DecoderError::Custom("No bodies cost specified"))?, - receipts: receipts.ok_or(DecoderError::Custom("No receipts cost specified"))?, - state_proofs: state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))?, - contract_codes: contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))?, - header_proofs: header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))?, - transaction_proof: transaction_proof.ok_or(DecoderError::Custom("No transaction proof gas cost specified"))?, - }) + unimplemented!() } } @@ -192,17 +138,19 @@ impl FlowParams { /// Create effectively infinite flow params. pub fn free() -> Self { - let free_cost = Cost(0.into(), 0.into()); + let free_cost: U256 = 0.into(); FlowParams { limit: (!0u64).into(), recharge: 1.into(), costs: CostTable { + base: free_cost.clone(), headers: free_cost.clone(), - bodies: free_cost.clone(), + body: free_cost.clone(), receipts: free_cost.clone(), - state_proofs: free_cost.clone(), - contract_codes: free_cost.clone(), - header_proofs: free_cost.clone(), + account: free_cost.clone(), + storage: free_cost.clone(), + code: free_cost.clone(), + header_proof: free_cost.clone(), transaction_proof: free_cost, } } @@ -219,56 +167,20 @@ impl FlowParams { /// Compute the actual cost of a request, given the kind of request /// and number of requests made. - pub fn compute_cost(&self, kind: request::Kind, amount: usize) -> U256 { - let cost = match kind { - request::Kind::Headers => &self.costs.headers, - request::Kind::Bodies => &self.costs.bodies, - request::Kind::Receipts => &self.costs.receipts, - request::Kind::StateProofs => &self.costs.state_proofs, - request::Kind::Codes => &self.costs.contract_codes, - request::Kind::HeaderProofs => &self.costs.header_proofs, - request::Kind::TransactionProof => &self.costs.transaction_proof, - }; - - let amount: U256 = amount.into(); - cost.0 + (amount * cost.1) - } - - /// Compute the maximum number of costs of a specific kind which can be made - /// with the given amount of credits - /// Saturates at `usize::max()`. This is not a problem in practice because - /// this amount of requests is already prohibitively large. - pub fn max_amount(&self, credits: &Credits, kind: request::Kind) -> usize { - use util::Uint; - use std::usize; - - let cost = match kind { - request::Kind::Headers => &self.costs.headers, - request::Kind::Bodies => &self.costs.bodies, - request::Kind::Receipts => &self.costs.receipts, - request::Kind::StateProofs => &self.costs.state_proofs, - request::Kind::Codes => &self.costs.contract_codes, - request::Kind::HeaderProofs => &self.costs.header_proofs, - request::Kind::TransactionProof => &self.costs.transaction_proof, - }; - - let start = credits.current(); - - if start <= cost.0 { - return 0; - } else if cost.1 == U256::zero() { - return usize::MAX; - } - - let max = (start - cost.0) / cost.1; - if max >= usize::MAX.into() { - usize::MAX - } else { - max.as_u64() as usize + pub fn compute_cost(&self, request: &Request) -> U256 { + match *request { + Request::Headers(ref req) => self.costs.headers * req.max.into(), + Request::HeaderProof(_) => self.costs.header_proof, + Request::Body(_) => self.costs.body, + Request::Receipts(_) => self.costs.receipts, + Request::Account(_) => self.costs.account, + Request::Storage(_) => self.costs.storage, + Request::Code(_) => self.costs.code, + Request::Execution(ref req) => self.costs.transaction_proof * req.gas, } } - /// Create initial credits.. + /// Create initial credits. pub fn create_credits(&self) -> Credits { Credits { estimate: self.limit, diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index e6d4068da..c329d780f 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -89,22 +89,7 @@ impl RequestSet { None => return false, }; - let kind = self.reqs.values() - .next() - .map(|r| r.kind()) - .expect("base time implies `reqs` non-empty; qed"); - - let kind_timeout = match kind { - request::Kind::Headers => timeout::HEADERS, - request::Kind::Bodies => timeout::BODIES, - request::Kind::Receipts => timeout::RECEIPTS, - request::Kind::StateProofs => timeout::PROOFS, - request::Kind::Codes => timeout::CONTRACT_CODES, - request::Kind::HeaderProofs => timeout::HEADER_PROOFS, - request::Kind::TransactionProof => timeout::TRANSACTION_PROOF, - }; - - base + Duration::milliseconds(kind_timeout) <= now + unimplemented!() } /// Collect all pending request ids. diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 4a9a96999..be9239e4d 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -121,7 +121,7 @@ pub trait Provider: Send + Sync { /// Provide a proof-of-execution for the given transaction proof request. /// Returns a vector of all state items necessary to execute the transaction. - fn transaction_proof(&self, req: request::TransactionProof) -> Option>; + fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option; } // Implementation of a light client data provider for a client. @@ -143,12 +143,12 @@ impl Provider for T { } fn block_body(&self, req: request::CompleteBodyRequest) -> Option { - BlockChainClient::block_body(self, id) + BlockChainClient::block_body(self, BlockId::Hash(req.hash)) .map(|body| ::request::BodyResponse { body: body }) } fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { - BlockChainClient::block_receipts(self, hash) + BlockChainClient::block_receipts(self, &req.hash) .map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode(&x) }) } @@ -165,7 +165,7 @@ impl Provider for T { } fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { - self.prove_account(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { + self.prove_storage(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { ::request::StorageResponse { proof: proof, value: item, @@ -173,7 +173,7 @@ impl Provider for T { }) } - fn contract_code(&self, req: request::ContractCode) -> Option { + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { self.state_data(&req.code_hash) .map(|code| ::request::CodeResponse { code: code }) } @@ -239,7 +239,7 @@ impl Provider for T { fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option { use ethcore::transaction::Transaction; - let id = BlockId::Hash(req.at); + let id = BlockId::Hash(req.block_hash); let nonce = match self.nonce(&req.from, id.clone()) { Some(nonce) => nonce, None => return None, @@ -321,7 +321,7 @@ impl Provider for LightProvider { None } - fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { None } diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/request_builder.rs new file mode 100644 index 000000000..6233075bb --- /dev/null +++ b/ethcore/light/src/request_builder.rs @@ -0,0 +1,116 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Request chain builder utility. +//! Push requests with `push`. Back-references and data required to verify responses must be +//! supplied as well. + +use std::collections::{HashMap, VecDeque}; +use request::{ + IncompleteRequest, CompleteRequest, Request, + Field, OutputKind, Output, NoSuchOutput, Response, +}; + +/// Build chained requests. Push them onto the series with `push`, +/// and produce a `Requests` object with `build`. Outputs are checked for consistency. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct RequestBuilder { + output_kinds: HashMap<(usize, usize), OutputKind>, + requests: Vec, +} + +impl RequestBuilder { + /// Attempt to push a request onto the request chain. Fails if the request + /// references a non-existant output of a prior request. + pub fn push(&mut self, request: Request) -> Result<(), NoSuchOutput> { + request.check_outputs(|req, idx, kind| { + match self.output_kinds.get(&(req, idx)) { + Some(k) if k == &kind => Ok(()), + _ => Err(NoSuchOutput), + } + })?; + let req_idx = self.requests.len(); + request.note_outputs(|idx, kind| { self.output_kinds.insert((req_idx, idx), kind); }); + self.requests.push(request); + Ok(()) + } + + /// Convert this into a "requests" object. + pub fn build(self) -> Requests { + Requests { + output_kinds: self.output_kinds, + outputs: HashMap::new(), + requests: self.requests, + offset: 0, + } + } +} + +/// Requests pending responses. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Requests { + output_kinds: HashMap<(usize, usize), OutputKind>, + outputs: HashMap<(usize, usize), Output>, + requests: Vec, + offset: usize, // offset for splitting. +} + +impl Requests { + /// For each request, produce responses for each. + /// The responses vector produced goes up to the point where the responder + /// first returns `None`, an invalid response, or until all requests have been responded to. + pub fn respond_to_all(mut self, responder: F) -> Vec + where F: Fn(CompleteRequest) -> Option + { + let mut responses = Vec::new(); + let mut found_bad = false; + let offset = self.offset; + let output_kinds = self.output_kinds; + let mut outputs = self.outputs; + for (idx, req) in self.requests.into_iter().enumerate().map(|(idx, req)| (idx + offset, req)) { + let complete = req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + .expect("All outputs checked as invariant of `Requests` object; qed"); + + match responder(complete) { + Some(response) => { + response.fill_outputs(|out_idx, output| { + match output_kinds.get(&(idx, out_idx)) { + None => {}, + Some(out) => if out == &output.kind() { + outputs.insert((idx, out_idx), output); + } else { + // output kind doesn't match expected. + found_bad = true; + } + } + }); + + if found_bad { + return responses; + } + + responses.push(response); + } + None => return responses, + } + } + + responses + } + + /// Get access to the underlying slice of requests. + pub fn requests(&self) -> &[Request] { &self.requests } +} diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 7ad16ea4d..a84a37435 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use ethcore::transaction::Action; -use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Address, H256, U256, Uint}; // re-exports of request types. @@ -65,6 +65,7 @@ pub use self::execution::{ }; /// Error indicating a reference to a non-existent or wrongly-typed output. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NoSuchOutput; /// An input to a request. @@ -77,7 +78,7 @@ pub enum Field { BackReference(usize, usize), } -impl From for Field { +impl From for Field { fn from(val: T) -> Self { Field::Scalar(val) } @@ -119,7 +120,8 @@ pub enum Output { } impl Output { - fn kind(&self) -> OutputKind { + /// Get the output kind. + pub fn kind(&self) -> OutputKind { match *self { Output::Hash(_) => OutputKind::Hash, Output::Number(_) => OutputKind::Number, @@ -158,6 +160,24 @@ impl From for HashOrNumber { } } +impl Decodable for HashOrNumber { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + rlp.val_at::(0).map(HashOrNumber::Hash) + .or_else(|_| rlp.val_at(0).map(HashOrNumber::Number)) + } +} + +impl Encodable for HashOrNumber { + fn rlp_append(&self, s: &mut RlpStream) { + match *self { + HashOrNumber::Hash(ref hash) => s.append(hash), + HashOrNumber::Number(ref num) => s.append(num), + }; + } +} + /// All request types, as they're sent over the network. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Request { @@ -305,13 +325,20 @@ impl IncompleteRequest for Request { pub enum Kind { /// A request for headers. Headers = 0, + /// A request for a header proof. HeaderProof = 1, // TransactionIndex = 2, + /// A request for block receipts. Receipts = 3, + /// A request for a block body. Body = 4, + /// A request for an account + merkle proof. Account = 5, + /// A request for contract storage + merkle proof Storage = 6, + /// A request for contract. Code = 7, + /// A request for transaction execution + state proof. Execution = 8, } @@ -336,7 +363,7 @@ impl Decodable for Kind { impl Encodable for Kind { fn rlp_append(&self, s: &mut RlpStream) { - s.append(self as &u8); + s.append(&(*self as u8)); } } @@ -366,14 +393,14 @@ impl Response { /// Fill reusable outputs by writing them into the function. pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { match *self { - Response::Headers(res) => res.fill_outputs(f), - Response::HeaderProof(res) => res.fill_outputs(f), - Response::Receipts(res) => res.fill_outputs(f), - Response::Body(res) => res.fill_outputs(f), - Response::Account(res) => res.fill_outputs(f), - Response::Storage(res) => res.fill_outputs(f), - Response::Code(res) => res.fill_outputs(f), - Response::Execution(res) => res.fill_outputs(f), + Response::Headers(ref res) => res.fill_outputs(f), + Response::HeaderProof(ref res) => res.fill_outputs(f), + Response::Receipts(ref res) => res.fill_outputs(f), + Response::Body(ref res) => res.fill_outputs(f), + Response::Account(ref res) => res.fill_outputs(f), + Response::Storage(ref res) => res.fill_outputs(f), + Response::Code(ref res) => res.fill_outputs(f), + Response::Execution(ref res) => res.fill_outputs(f), } } @@ -386,7 +413,7 @@ impl Response { Response::Account(_) => Kind::Account, Response::Storage(_) => Kind::Storage, Response::Code(_) => Kind::Code, - Respnse::Execution(_) => Kind::Execution, + Response::Execution(_) => Kind::Execution, } } } @@ -403,7 +430,7 @@ impl Decodable for Response { Kind::Account => Ok(Response::Account(rlp.val_at(1)?)), Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), - Kind::Execution=> Ok(Response::Execution(rlp.val_at(1)?)), + Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)), } } } @@ -427,6 +454,7 @@ impl Encodable for Response { /// A potentially incomplete request. pub trait IncompleteRequest: Sized { + /// The complete variant of this request. type Complete; /// Check prior outputs against the needed inputs. @@ -453,7 +481,6 @@ pub mod header { use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::U256; /// Potentially incomplete headers request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -461,9 +488,9 @@ pub mod header { /// Start block. pub start: Field, /// Skip between. - pub skip: U256, + pub skip: u64, /// Maximum to return. - pub max: U256, + pub max: u64, /// Whether to reverse from start. pub reverse: bool, } @@ -499,7 +526,7 @@ pub mod header { match self.start { Field::Scalar(_) => Ok(()), Field::BackReference(req, idx) => - f(req, idx, OutputKind::Hash).or_else(|| f(req, idx, OutputKind::Number)) + f(req, idx, OutputKind::Hash).or_else(|_| f(req, idx, OutputKind::Number)) } } @@ -532,9 +559,9 @@ pub mod header { /// Start block. pub start: HashOrNumber, /// Skip between. - pub skip: U256, + pub skip: u64, /// Maximum to return. - pub max: U256, + pub max: u64, /// Whether to reverse from start. pub reverse: bool, } @@ -695,7 +722,7 @@ pub mod block_receipts { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::receipt::Receipt; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::H256; /// Potentially incomplete block receipts request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -725,7 +752,7 @@ pub mod block_receipts { fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> { - match self.num { + match self.hash { Field::Scalar(_) => Ok(()), Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), } @@ -791,7 +818,7 @@ pub mod block_body { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::H256; /// Potentially incomplete block body request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -821,7 +848,7 @@ pub mod block_body { fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> { - match self.num { + match self.hash { Field::Scalar(_) => Ok(()), Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), } @@ -869,14 +896,14 @@ pub mod block_body { impl Decodable for Response { fn decode(decoder: &D) -> Result where D: Decoder { use ethcore::header::Header as FullHeader; - use ethcore::transaction::SignedTransaction; + use ethcore::transaction::UnverifiedTransaction; let rlp = decoder.as_rlp(); let body_rlp = rlp.at(0)?; // check body validity. let _: Vec = rlp.val_at(0)?; - let _: Vec = rlp.val_at(1)?; + let _: Vec = rlp.val_at(1)?; Ok(Response { body: encoded::Body::new(body_rlp.as_raw().to_owned()), @@ -895,7 +922,6 @@ pub mod block_body { /// A request for an account proof. pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; @@ -1028,7 +1054,7 @@ pub mod account { .append(&self.nonce) .append(&self.balance) .append(&self.code_hash) - .append(&self.storage_root) + .append(&self.storage_root); } } } @@ -1036,9 +1062,8 @@ pub mod account { /// A request for a storage proof. pub mod storage { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::{Bytes, H256}; /// Potentially incomplete request for an storage proof. #[derive(Debug, Clone, PartialEq, Eq)] @@ -1182,9 +1207,8 @@ pub mod storage { /// A request for contract code. pub mod contract_code { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::{Bytes, H256}; /// Potentially incomplete contract code request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -1299,7 +1323,6 @@ pub mod contract_code { /// A request for proof of execution. pub mod execution { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use ethcore::transaction::Action; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, Address, U256, H256, DBValue}; @@ -1328,7 +1351,7 @@ pub mod execution { let rlp = decoder.as_rlp(); Ok(Incomplete { block_hash: rlp.val_at(0)?, - address: rlp.val_at(1)?, + from: rlp.val_at(1)?, action: rlp.val_at(2)?, gas: rlp.val_at(3)?, gas_price: rlp.val_at(4)?, @@ -1344,7 +1367,7 @@ pub mod execution { .append(&self.block_hash) .append(&self.from); - match *self.action { + match self.action { Action::Create => s.append_empty_data(), Action::Call(ref addr) => s.append(addr), }; @@ -1432,7 +1455,7 @@ pub mod execution { let mut items = Vec::new(); for raw_item in rlp.at(0)?.iter() { let mut item = DBValue::new(); - item.append_slice(raw_item.data()); + item.append_slice(raw_item.data()?); items.push(item); } @@ -1444,7 +1467,7 @@ pub mod execution { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(&self.items.len()); + s.begin_list(self.items.len()); for item in &self.items { s.append(&&**item); From dbd05e6c92dcce035621b75ac2ab954f57ef3fd8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Mar 2017 20:58:23 +0100 Subject: [PATCH 20/91] handle request packet in LightProtocol --- ethcore/light/src/net/context.rs | 4 ++-- ethcore/light/src/net/error.rs | 4 ++++ ethcore/light/src/net/mod.rs | 37 ++++++++++++++++++++++++++++---- 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 332d497a1..80a829962 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -50,13 +50,13 @@ pub trait IoContext { impl<'a> IoContext for NetworkContext<'a> { fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { if let Err(e) = self.send(peer, packet_id, packet_body) { - debug!(target: "les", "Error sending packet to peer {}: {}", peer, e); + debug!(target: "pip", "Error sending packet to peer {}: {}", peer, e); } } fn respond(&self, packet_id: u8, packet_body: Vec) { if let Err(e) = self.respond(packet_id, packet_body) { - debug!(target: "les", "Error responding to peer message: {}", e); + debug!(target: "pip", "Error responding to peer message: {}", e); } } diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index dda78e0b6..1c0374c7e 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -56,6 +56,8 @@ pub enum Error { UnknownPeer, /// Unsolicited response. UnsolicitedResponse, + /// Bad back-reference in request. + BadBackReference, /// Not a server. NotServer, /// Unsupported protocol version. @@ -78,6 +80,7 @@ impl Error { Error::WrongNetwork => Punishment::Disable, Error::UnknownPeer => Punishment::Disconnect, Error::UnsolicitedResponse => Punishment::Disable, + Error::BadBackReference => Punishment::Disable, Error::NotServer => Punishment::Disable, Error::UnsupportedProtocolVersion(_) => Punishment::Disable, Error::BadProtocolVersion => Punishment::Disable, @@ -109,6 +112,7 @@ impl fmt::Display for Error { Error::WrongNetwork => write!(f, "Wrong network"), Error::UnknownPeer => write!(f, "Unknown peer"), Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"), + Error::BadBackReference => write!(f, "Bad back-reference in request."), Error::NotServer => write!(f, "Peer not a server."), Error::UnsupportedProtocolVersion(pv) => write!(f, "Unsupported protocol version: {}", pv), Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"), diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 1b2433fbe..8363fcfe7 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -642,13 +642,13 @@ impl LightProtocol { Ok(()) } + // Receive requests from a peer. fn request(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { // the maximum amount of requests we'll fill in a single packet. - const MAX_REQUESTS: usize = 512; - // the maximum amount of gas we'll prove execution of in a single packet. - const MAX_GAS: usize = 50_000_000; + const MAX_REQUESTS: usize = 256; use ::request_builder::RequestBuilder; + use ::request::CompleteRequest; let peers = self.peers.read(); let peer = match peers.get(peer) { @@ -661,8 +661,37 @@ impl LightProtocol { let mut peer = peer.lock(); let req_id: u64 = raw.val_at(0)?; + let mut cumulative_cost = U256::from(0); + let cur_buffer = peer.local_credits.current(); - unimplemented!() + let mut request_builder = RequestBuilder::default(); + + // deserialize requests, check costs and back-references. + for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { + let request: Request = request_rlp.as_val()?; + cumulative_cost = cumulative_cost + self.flow_params.compute_cost(&request); + if cumulative_cost > cur_buffer { return Err(Error::NoCredits) } + request_builder.push(request).map_err(|_| Error::BadBackReference)?; + } + + let requests = request_builder.build(); + + // respond to all requests until one fails. + let responses = requests.respond_to_all(|complete_req| { + match complete_req { + CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers), + CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof), + CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body), + CompleteRequest::Receipts(req) => self.provider.block_receipts(req).map(Response::Receipts), + CompleteRequest::Account(req) => self.provider.account_proof(req).map(Response::Account), + CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage), + CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code), + CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution), + } + }); + + io.respond(packet::RESPONSE, ::rlp::encode(&responses).to_vec()); + Ok(()) } fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { From ee034185a55858da6958c0c01839c45a5c3ec8d1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 15:28:46 +0100 Subject: [PATCH 21/91] handle response packets --- ethcore/light/src/net/mod.rs | 40 +++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 8363fcfe7..b6f514371 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -167,7 +167,9 @@ pub trait Handler: Send + Sync { /// Called when a peer requests relay of some transactions. fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { } /// Called when a peer responds to requests. - fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _relay: &[Response]) { } + /// Responses not guaranteed to contain valid data and are not yet checked against + /// the requests they correspond to. + fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _responses: &[Response]) { } /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } /// Called to "tick" the handler periodically. @@ -380,11 +382,11 @@ impl LightProtocol { // - check whether peer exists // - check whether request was made // - check whether request kinds match - fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result { + fn pre_verify_response(&self, peer: &PeerId, raw: &UntrustedRlp) -> Result { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "pip", "pre-verifying response from peer {}, kind={:?}", peer, kind); + trace!(target: "pip", "pre-verifying response from peer {}", peer); let peers = self.peers.read(); let res = match peers.get(peer) { @@ -394,7 +396,7 @@ impl LightProtocol { let flow_info = peer_info.remote_flow.as_mut(); match (req_info, flow_info) { - (Some(request), Some(flow_info)) => { + (Some(_), Some(flow_info)) => { let &mut (ref mut c, ref mut flow) = flow_info; let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); c.update_to(actual_credits); @@ -662,15 +664,13 @@ impl LightProtocol { let req_id: u64 = raw.val_at(0)?; let mut cumulative_cost = U256::from(0); - let cur_buffer = peer.local_credits.current(); let mut request_builder = RequestBuilder::default(); - // deserialize requests, check costs and back-references. + // deserialize requests, check costs and request validity. for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; - cumulative_cost = cumulative_cost + self.flow_params.compute_cost(&request); - if cumulative_cost > cur_buffer { return Err(Error::NoCredits) } + peer.local_credits.deduct_cost(self.flow_params.compute_cost(&request))?; request_builder.push(request).map_err(|_| Error::BadBackReference)?; } @@ -690,12 +690,32 @@ impl LightProtocol { } }); - io.respond(packet::RESPONSE, ::rlp::encode(&responses).to_vec()); + io.respond(packet::RESPONSE, { + let mut stream = RlpStream::new_list(3); + let cur_credits = peer.local_credits.current(); + stream.append(&req_id).append(&cur_credits).append(&responses); + stream.out() + }); Ok(()) } + // handle a packet with responses. fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + let (req_id, responses) = { + let id_guard = self.pre_verify_response(peer, &raw)?; + let responses: Vec = raw.val_at(2)?; + (id_guard.defuse(), responses) + }; + + for handler in &self.handlers { + handler.on_responses(&Ctx { + io: io, + proto: self, + peer: *peer, + }, req_id, &responses); + } + + Ok(()) } // Receive a set of transactions to relay. From bb39f104f46f3d79b85f197e2cd6f4c31b541fc1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 17:37:07 +0100 Subject: [PATCH 22/91] implement requesting from --- ethcore/light/src/net/context.rs | 16 ++++-- ethcore/light/src/net/mod.rs | 75 +++++++++++++++++----------- ethcore/light/src/net/request_set.rs | 30 ++++++++--- ethcore/light/src/provider.rs | 14 +++--- ethcore/light/src/request_builder.rs | 6 +-- ethcore/light/src/types/request.rs | 7 +-- 6 files changed, 92 insertions(+), 56 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 80a829962..659c117af 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -21,6 +21,7 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; use request::{self, Request}; +use request_builder::Requests; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions @@ -83,7 +84,12 @@ pub trait BasicContext { fn persistent_peer_id(&self, peer: PeerId) -> Option; /// Make a request from a peer. - fn request_from(&self, peer: PeerId, request: Request) -> Result; + /// + /// Fails on: nonexistent peer, network error, peer not server, + /// insufficient credits. Does not check capabilities before sending. + /// On success, returns a request id which can later be coordinated + /// with an event. + fn request_from(&self, peer: PeerId, request: Requests) -> Result; /// Make an announcement of new capabilities to the rest of the peers. // TODO: maybe just put this on a timer in LightProtocol? @@ -119,8 +125,8 @@ impl<'a> BasicContext for TickCtx<'a> { self.io.persistent_peer_id(id) } - fn request_from(&self, peer: PeerId, request: Request) -> Result { - self.proto.request_from(self.io, &peer, request) + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, &peer, requests) } fn make_announcement(&self, announcement: Announcement) { @@ -152,8 +158,8 @@ impl<'a> BasicContext for Ctx<'a> { self.io.persistent_peer_id(id) } - fn request_from(&self, peer: PeerId, request: Request) -> Result { - self.proto.request_from(self.io, &peer, request) + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, &peer, requests) } fn make_announcement(&self, announcement: Announcement) { diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index b6f514371..57459ec01 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -35,6 +35,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; use request::{self, HashOrNumber, Request, Response}; +use request_builder::Requests; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; @@ -71,8 +72,8 @@ pub const PROTOCOL_VERSIONS: &'static [u8] = &[1]; /// Max protocol version. pub const MAX_PROTOCOL_VERSION: u8 = 1; -/// Packet count for LES. -pub const PACKET_COUNT: u8 = 17; +/// Packet count for PIP. +pub const PACKET_COUNT: u8 = 5; // packet ID definitions. mod packet { @@ -88,24 +89,21 @@ mod packet { // relay transactions to peers. pub const SEND_TRANSACTIONS: u8 = 0x04; - - // request and response for transaction proof. - // TODO: merge with request/response. - pub const GET_TRANSACTION_PROOF: u8 = 0x05; - pub const TRANSACTION_PROOF: u8 = 0x06; } // timeouts for different kinds of requests. all values are in milliseconds. -// TODO: variable timeouts based on request count. mod timeout { pub const HANDSHAKE: i64 = 2500; - pub const HEADERS: i64 = 2500; - pub const BODIES: i64 = 5000; - pub const RECEIPTS: i64 = 3500; - pub const PROOFS: i64 = 4000; - pub const CONTRACT_CODES: i64 = 5000; - pub const HEADER_PROOFS: i64 = 3500; - pub const TRANSACTION_PROOF: i64 = 5000; + pub const BASE: i64 = 1500; // base timeout for packet. + + // timeouts per request within packet. + pub const HEADERS: i64 = 250; // per header? + pub const BODY: i64 = 50; + pub const RECEIPT: i64 = 50; + pub const PROOF: i64 = 100; // state proof + pub const CONTRACT_CODE: i64 = 100; + pub const HEADER_PROOF: i64 = 100; + pub const TRANSACTION_PROOF: i64 = 1000; // per gas? } /// A request id. @@ -138,16 +136,7 @@ pub struct Peer { failed_requests: Vec, } -impl Peer { - // refund credits for a request. returns new amount of credits. - fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { - flow_params.refund(&mut self.local_credits, amount); - - self.local_credits.current() - } -} - -/// An LES event handler. +/// A light protocol event handler. /// /// Each handler function takes a context which describes the relevant peer /// and gives references to the IO layer and protocol structure so new messages @@ -304,9 +293,37 @@ impl LightProtocol { /// insufficient credits. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. - // TODO: pass `Requests`. - pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { - unimplemented!() + pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, requests: Requests) -> Result { + let peers = self.peers.read(); + let peer = match peers.get(peer_id) { + Some(peer) => peer, + None => return Err(Error::UnknownPeer), + }; + + let mut peer = peer.lock(); + let peer = &mut *peer; + match peer.remote_flow { + None => Err(Error::NotServer), + Some((ref mut creds, ref params)) => { + // check that enough credits are available. + let mut temp_creds: Credits = creds.clone(); + for request in requests.requests() { + temp_creds.deduct_cost(params.compute_cost(request))?; + } + *creds = temp_creds; + + let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); + io.send(*peer_id, packet::REQUEST, { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id.0).append(&requests.requests()); + stream.out() + }); + + // begin timeout. + peer.pending_requests.insert(req_id, requests, SteadyTime::now()); + Ok(req_id) + } + } } /// Make an announcement of new chain head and capabilities to all peers. @@ -663,8 +680,6 @@ impl LightProtocol { let mut peer = peer.lock(); let req_id: u64 = raw.val_at(0)?; - let mut cumulative_cost = U256::from(0); - let mut request_builder = RequestBuilder::default(); // deserialize requests, check costs and request validity. diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index c329d780f..eefc6dfd5 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -25,6 +25,7 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; use request::{self, Request}; +use request_builder::Requests; use net::{timeout, ReqId}; use time::{Duration, SteadyTime}; @@ -35,7 +36,7 @@ pub struct RequestSet { counter: u64, base: Option, ids: HashMap, - reqs: BTreeMap, + reqs: BTreeMap, } impl Default for RequestSet { @@ -50,8 +51,8 @@ impl Default for RequestSet { } impl RequestSet { - /// Push a request onto the stack. - pub fn insert(&mut self, req_id: ReqId, req: Request, now: SteadyTime) { + /// Push requests onto the stack. + pub fn insert(&mut self, req_id: ReqId, req: Requests, now: SteadyTime) { let counter = self.counter; self.ids.insert(req_id, counter); self.reqs.insert(counter, req); @@ -63,8 +64,8 @@ impl RequestSet { self.counter += 1; } - /// Remove a request from the stack. - pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option { + /// Remove a set of requests from the stack. + pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option { let id = match self.ids.remove(&req_id) { Some(id) => id, None => return None, @@ -89,7 +90,24 @@ impl RequestSet { None => return false, }; - unimplemented!() + let first_req = self.reqs.values().next() + .expect("base existing implies `reqs` non-empty; qed"); + + // timeout is a base + value per request contained within. + let timeout = first_req.requests().iter().fold(timeout::BASE, |tm, req| { + tm + match *req { + Request::Headers(_) => timeout::HEADERS, + Request::HeaderProof(_) => timeout::HEADER_PROOF, + Request::Receipts(_) => timeout::RECEIPT, + Request::Body(_) => timeout::BODY, + Request::Account(_) => timeout::PROOF, + Request::Storage(_) => timeout::PROOF, + Request::Code(_) => timeout::CONTRACT_CODE, + Request::Execution(_) => timeout::TRANSACTION_PROOF, + } + }); + + base + Duration::milliseconds(timeout) <= now } /// Collect all pending request ids. diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index be9239e4d..f6ffded82 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -24,7 +24,7 @@ use ethcore::client::{BlockChainClient, ProvingBlockChainClient}; use ethcore::transaction::PendingTransaction; use ethcore::ids::BlockId; use ethcore::encoded; -use util::{Bytes, DBValue, RwLock, H256}; +use util::{RwLock, H256}; use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; @@ -297,27 +297,27 @@ impl Provider for LightProvider { self.client.as_light_client().block_header(id) } - fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + fn block_body(&self, _req: request::CompleteBodyRequest) -> Option { None } - fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { + fn block_receipts(&self, _req: request::CompleteReceiptsRequest) -> Option { None } - fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + fn account_proof(&self, _req: request::CompleteAccountRequest) -> Option { None } - fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + fn storage_proof(&self, _req: request::CompleteStorageRequest) -> Option { None } - fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + fn contract_code(&self, _req: request::CompleteCodeRequest) -> Option { None } - fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { + fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { None } diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/request_builder.rs index 6233075bb..3533026a5 100644 --- a/ethcore/light/src/request_builder.rs +++ b/ethcore/light/src/request_builder.rs @@ -18,10 +18,10 @@ //! Push requests with `push`. Back-references and data required to verify responses must be //! supplied as well. -use std::collections::{HashMap, VecDeque}; +use std::collections::HashMap; use request::{ IncompleteRequest, CompleteRequest, Request, - Field, OutputKind, Output, NoSuchOutput, Response, + OutputKind, Output, NoSuchOutput, Response, }; /// Build chained requests. Push them onto the series with `push`, @@ -72,7 +72,7 @@ impl Requests { /// For each request, produce responses for each. /// The responses vector produced goes up to the point where the responder /// first returns `None`, an invalid response, or until all requests have been responded to. - pub fn respond_to_all(mut self, responder: F) -> Vec + pub fn respond_to_all(self, responder: F) -> Vec where F: Fn(CompleteRequest) -> Option { let mut responses = Vec::new(); diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index a84a37435..d6893b0e1 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -16,11 +16,8 @@ //! Light protocol request types. -use std::collections::HashMap; - -use ethcore::transaction::Action; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; -use util::{Address, H256, U256, Uint}; +use util::H256; // re-exports of request types. pub use self::header::{ @@ -391,7 +388,7 @@ pub enum Response { impl Response { /// Fill reusable outputs by writing them into the function. - pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + pub fn fill_outputs(&self, f: F) where F: FnMut(usize, Output) { match *self { Response::Headers(ref res) => res.fill_outputs(f), Response::HeaderProof(ref res) => res.fill_outputs(f), From 969261695831236b6eddc30b2af28e9714e0d2a7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 18:01:41 +0100 Subject: [PATCH 23/91] re-do cost table --- ethcore/light/src/client/mod.rs | 2 +- ethcore/light/src/net/context.rs | 1 - ethcore/light/src/net/mod.rs | 11 +++-- ethcore/light/src/net/request_credits.rs | 55 ++++++++++++++++++++++-- ethcore/light/src/net/request_set.rs | 2 +- 5 files changed, 59 insertions(+), 12 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 34f7ed990..c791caed1 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{Bytes, DBValue, H256, Mutex, RwLock}; +use util::{H256, Mutex, RwLock}; use self::header_chain::{AncestryIter, HeaderChain}; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 659c117af..513388a92 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -20,7 +20,6 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; -use request::{self, Request}; use request_builder::Requests; /// An I/O context which allows sending and receiving packets as well as diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 57459ec01..0241cf3f1 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -18,14 +18,13 @@ //! //! This uses a "Provider" to answer requests. -use ethcore::transaction::{Action, UnverifiedTransaction}; -use ethcore::receipt::Receipt; +use ethcore::transaction::UnverifiedTransaction; use io::TimerToken; use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use rlp::{RlpStream, Stream, UntrustedRlp, View}; use util::hash::H256; -use util::{Bytes, DBValue, Mutex, RwLock, U256}; +use util::{DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; use std::collections::HashMap; @@ -34,7 +33,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{self, HashOrNumber, Request, Response}; +use request::{Request, Response}; use request_builder::Requests; use self::request_credits::{Credits, FlowParams}; @@ -48,8 +47,8 @@ mod error; mod status; mod request_set; -#[cfg(test)] -mod tests; +// #[cfg(test)] +// mod tests; pub mod request_credits; diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index e3821e05a..abeb7e569 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -27,7 +27,6 @@ //! on any empirical timings or mathematical models. use request::{self, Request}; -use super::packet; use super::error::Error; use rlp::*; @@ -107,13 +106,63 @@ impl Default for CostTable { impl RlpEncodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { - unimplemented!() + fn append_cost(s: &mut RlpStream, cost: &U256, kind: request::Kind) { + s.begin_list(2).append(&kind).append(cost); + } + + s.begin_list(9).append(&self.base); + append_cost(s, &self.headers, request::Kind::Headers); + append_cost(s, &self.body, request::Kind::Body); + append_cost(s, &self.receipts, request::Kind::Receipts); + append_cost(s, &self.account, request::Kind::Account); + append_cost(s, &self.storage, request::Kind::Storage); + append_cost(s, &self.code, request::Kind::Code); + append_cost(s, &self.header_proof, request::Kind::HeaderProof); + append_cost(s, &self.transaction_proof, request::Kind::Execution); } } impl RlpDecodable for CostTable { fn decode(decoder: &D) -> Result where D: Decoder { - unimplemented!() + let rlp = decoder.as_rlp(); + let base = rlp.val_at(0)?; + + let mut headers = None; + let mut body = None; + let mut receipts = None; + let mut account = None; + let mut storage = None; + let mut code = None; + let mut header_proof = None; + let mut transaction_proof = None; + + for cost_list in rlp.iter().skip(1) { + let cost = cost_list.val_at(1)?; + match cost_list.val_at(0)? { + request::Kind::Headers => headers = Some(cost), + request::Kind::Body => body = Some(cost), + request::Kind::Receipts => receipts = Some(cost), + request::Kind::Account => account = Some(cost), + request::Kind::Storage => storage = Some(cost), + request::Kind::Code => code = Some(cost), + request::Kind::HeaderProof => header_proof = Some(cost), + request::Kind::Execution => transaction_proof = Some(cost), + } + } + + let unwrap_cost = |cost: Option| cost.ok_or(DecoderError::Custom("Not all costs specified in cost table.")); + + Ok(CostTable { + base: base, + headers: unwrap_cost(headers)?, + body: unwrap_cost(body)?, + receipts: unwrap_cost(receipts)?, + account: unwrap_cost(account)?, + storage: unwrap_cost(storage)?, + code: unwrap_cost(code)?, + header_proof: unwrap_cost(header_proof)?, + transaction_proof: unwrap_cost(transaction_proof)?, + }) } } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index eefc6dfd5..8405b8c89 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -24,7 +24,7 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; -use request::{self, Request}; +use request::Request; use request_builder::Requests; use net::{timeout, ReqId}; From 9268a1f59cf134e20c0fbac5495404732aaa920e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 18:27:29 +0100 Subject: [PATCH 24/91] get tests compiling --- ethcore/light/src/net/request_set.rs | 44 +++++++++++++++------------- ethcore/light/src/provider.rs | 6 ++-- ethcore/light/src/types/request.rs | 2 +- 3 files changed, 27 insertions(+), 25 deletions(-) diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index 8405b8c89..f66b44f6e 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -93,21 +93,7 @@ impl RequestSet { let first_req = self.reqs.values().next() .expect("base existing implies `reqs` non-empty; qed"); - // timeout is a base + value per request contained within. - let timeout = first_req.requests().iter().fold(timeout::BASE, |tm, req| { - tm + match *req { - Request::Headers(_) => timeout::HEADERS, - Request::HeaderProof(_) => timeout::HEADER_PROOF, - Request::Receipts(_) => timeout::RECEIPT, - Request::Body(_) => timeout::BODY, - Request::Account(_) => timeout::PROOF, - Request::Storage(_) => timeout::PROOF, - Request::Code(_) => timeout::CONTRACT_CODE, - Request::Execution(_) => timeout::TRANSACTION_PROOF, - } - }); - - base + Duration::milliseconds(timeout) <= now + base + compute_timeout(&first_req) <= now } /// Collect all pending request ids. @@ -124,25 +110,43 @@ impl RequestSet { pub fn is_empty(&self) -> bool { self.len() == 0 } } +// helper to calculate timeout for a specific set of requests. +// it's a base amount + some amount per request. +fn compute_timeout(reqs: &Requests) -> Duration { + Duration::milliseconds(reqs.requests().iter().fold(timeout::BASE, |tm, req| { + tm + match *req { + Request::Headers(_) => timeout::HEADERS, + Request::HeaderProof(_) => timeout::HEADER_PROOF, + Request::Receipts(_) => timeout::RECEIPT, + Request::Body(_) => timeout::BODY, + Request::Account(_) => timeout::PROOF, + Request::Storage(_) => timeout::PROOF, + Request::Code(_) => timeout::CONTRACT_CODE, + Request::Execution(_) => timeout::TRANSACTION_PROOF, + } + })) +} + #[cfg(test)] mod tests { - use net::{timeout, ReqId}; - use request::{Request, Receipts}; + use net::ReqId; + use request_builder::RequestBuilder; use time::{SteadyTime, Duration}; - use super::RequestSet; + use super::{RequestSet, compute_timeout}; #[test] fn multi_timeout() { let test_begin = SteadyTime::now(); let mut req_set = RequestSet::default(); - let the_req = Request::Receipts(Receipts { block_hashes: Vec::new() }); + let the_req = RequestBuilder::default().build(); + let req_time = compute_timeout(&the_req); req_set.insert(ReqId(0), the_req.clone(), test_begin); req_set.insert(ReqId(1), the_req, test_begin + Duration::seconds(1)); assert_eq!(req_set.base, Some(test_begin)); - let test_end = test_begin + Duration::milliseconds(timeout::RECEIPTS); + let test_end = test_begin + req_time; assert!(req_set.check_timeout(test_end)); req_set.remove(&ReqId(0), test_begin + Duration::seconds(1)).unwrap(); diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index f6ffded82..36653fe4d 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -349,10 +349,8 @@ mod tests { let client = TestBlockChainClient::new(); client.add_blocks(2000, EachBlockWith::Nothing); - let req = ::request::HeaderProof { - cht_number: 0, - block_number: 1500, - from_level: 0, + let req = ::request::CompleteHeaderProofRequest { + num: 1500, }; assert!(client.header_proof(req.clone()).is_none()); diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index d6893b0e1..42aab4e20 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -346,7 +346,7 @@ impl Decodable for Kind { match rlp.as_val::()? { 0 => Ok(Kind::Headers), 1 => Ok(Kind::HeaderProof), - // 2 => Ok(Kind::TransactionIndex, + // 2 => Ok(Kind::TransactionIndex), 3 => Ok(Kind::Receipts), 4 => Ok(Kind::Body), 5 => Ok(Kind::Account), From a1186727af700a879df17850abb46ec0f4d86e65 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 18:38:25 +0100 Subject: [PATCH 25/91] fix cost table RLP encoding --- ethcore/light/src/net/request_credits.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index abeb7e569..4f7f8a6a3 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -104,10 +104,14 @@ impl Default for CostTable { } } -impl RlpEncodable for CostTable { +impl Encodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { fn append_cost(s: &mut RlpStream, cost: &U256, kind: request::Kind) { - s.begin_list(2).append(&kind).append(cost); + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&kind, s); + s.append(cost); } s.begin_list(9).append(&self.base); @@ -122,7 +126,7 @@ impl RlpEncodable for CostTable { } } -impl RlpDecodable for CostTable { +impl Decodable for CostTable { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); let base = rlp.val_at(0)?; From d9087dd2b6e55d9fc2ebb5ea1470a045fa869108 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 19:50:26 +0100 Subject: [PATCH 26/91] roundtrip tests for request types --- ethcore/light/src/types/request.rs | 237 +++++++++++++++++++++++++++-- 1 file changed, 223 insertions(+), 14 deletions(-) diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 42aab4e20..880bb99f2 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -100,10 +100,13 @@ impl Encodable for Field { fn rlp_append(&self, s: &mut RlpStream) { s.begin_list(2); match *self { - Field::Scalar(ref data) => s.append(&0u8).append(data), - Field::BackReference(ref req, ref idx) => - s.append(&1u8).begin_list(2).append(req).append(idx), - }; + Field::Scalar(ref data) => { + s.append(&0u8).append(data); + } + Field::BackReference(ref req, ref idx) => { + s.append(&1u8).begin_list(2).append(req).append(idx); + } + } } } @@ -161,8 +164,8 @@ impl Decodable for HashOrNumber { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); - rlp.val_at::(0).map(HashOrNumber::Hash) - .or_else(|_| rlp.val_at(0).map(HashOrNumber::Number)) + rlp.as_val::().map(HashOrNumber::Hash) + .or_else(|_| rlp.as_val().map(HashOrNumber::Number)) } } @@ -582,7 +585,7 @@ pub mod header { let mut headers = Vec::new(); - for item in rlp.at(0)?.iter() { + for item in rlp.iter() { // check that it's a valid encoding. // TODO: just return full headers here? let _: FullHeader = item.as_val()?; @@ -798,7 +801,7 @@ pub mod block_receipts { let rlp = decoder.as_rlp(); Ok(Response { - receipts: rlp.val_at(0)?, + receipts: rlp.as_val()?, }) } } @@ -896,22 +899,20 @@ pub mod block_body { use ethcore::transaction::UnverifiedTransaction; let rlp = decoder.as_rlp(); - let body_rlp = rlp.at(0)?; // check body validity. let _: Vec = rlp.val_at(0)?; let _: Vec = rlp.val_at(1)?; Ok(Response { - body: encoded::Body::new(body_rlp.as_raw().to_owned()), + body: encoded::Body::new(rlp.as_raw().to_owned()), }) } } impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2) - .append_raw(&self.body.rlp().as_raw(), 2); + s.append_raw(&self.body.rlp().as_raw(), 2); } } } @@ -1305,7 +1306,7 @@ pub mod contract_code { let rlp = decoder.as_rlp(); Ok(Response { - code: rlp.val_at(0)?, + code: rlp.as_val()?, }) } } @@ -1450,7 +1451,7 @@ pub mod execution { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); let mut items = Vec::new(); - for raw_item in rlp.at(0)?.iter() { + for raw_item in rlp.iter() { let mut item = DBValue::new(); item.append_slice(raw_item.data()?); items.push(item); @@ -1472,3 +1473,211 @@ pub mod execution { } } } + +#[cfg(test)] +mod tests { + use super::*; + use ethcore::header::Header; + + fn check_roundtrip(val: T) + where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug + { + let bytes = ::rlp::encode(&val); + let new_val: T = ::rlp::decode(&bytes); + assert_eq!(val, new_val); + } + + #[test] + fn hash_or_number_roundtrip() { + let hash = HashOrNumber::Hash(H256::default()); + let number = HashOrNumber::Number(5); + + check_roundtrip(hash); + check_roundtrip(number); + } + + #[test] + fn field_roundtrip() { + let field_scalar = Field::Scalar(5usize); + let field_back: Field = Field::BackReference(1, 2); + + check_roundtrip(field_scalar); + check_roundtrip(field_back); + } + + #[test] + fn headers_roundtrip() { + let req = IncompleteHeadersRequest { + start: Field::Scalar(5u64.into()), + skip: 0, + max: 100, + reverse: false, + }; + + let full_req = Request::Headers(req.clone()); + let res = HeadersResponse { + headers: vec![ + ::ethcore::encoded::Header::new(::rlp::encode(&Header::default()).to_vec()) + ] + }; + let full_res = Response::Headers(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn header_proof_roundtrip() { + let req = IncompleteHeaderProofRequest { + num: Field::BackReference(1, 234), + }; + + let full_req = Request::HeaderProof(req.clone()); + let res = HeaderProofResponse { + proof: Vec::new(), + hash: Default::default(), + td: 100.into(), + }; + let full_res = Response::HeaderProof(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn receipts_roundtrip() { + let req = IncompleteReceiptsRequest { + hash: Field::Scalar(Default::default()), + }; + + let full_req = Request::Receipts(req.clone()); + let res = ReceiptsResponse { + receipts: vec![Default::default(), Default::default()], + }; + let full_res = Response::Receipts(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn body_roundtrip() { + let req = IncompleteBodyRequest { + hash: Field::Scalar(Default::default()), + }; + + let full_req = Request::Body(req.clone()); + let res = BodyResponse { + body: { + let mut stream = RlpStream::new_list(2); + stream.begin_list(0).begin_list(0); + ::ethcore::encoded::Body::new(stream.out()) + }, + }; + let full_res = Response::Body(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn account_roundtrip() { + let req = IncompleteAccountRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + }; + + let full_req = Request::Account(req.clone()); + let res = AccountResponse { + proof: Vec::new(), + nonce: 100.into(), + balance: 123456.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }; + let full_res = Response::Account(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn storage_roundtrip() { + let req = IncompleteStorageRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + key_hash: Field::BackReference(3, 2), + }; + + let full_req = Request::Storage(req.clone()); + let res = StorageResponse { + proof: Vec::new(), + value: H256::default(), + }; + let full_res = Response::Storage(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn code_roundtrip() { + let req = IncompleteCodeRequest { + block_hash: Field::Scalar(Default::default()), + code_hash: Field::BackReference(3, 2), + }; + + let full_req = Request::Code(req.clone()); + let res = CodeResponse { + code: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], + }; + let full_res = Response::Code(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn execution_roundtrip() { + use util::DBValue; + + let req = IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::ethcore::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }; + + let full_req = Request::Execution(req.clone()); + let res = ExecutionResponse { + items: vec![DBValue::new(), { + let mut value = DBValue::new(); + value.append_slice(&[1, 1, 1, 2, 3]); + value + }], + }; + let full_res = Response::Execution(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } +} From aea9b1d6ccdb5a093094ba0c6f1b0aa188001da1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 20:07:45 +0100 Subject: [PATCH 27/91] request builder tests --- ethcore/light/src/request_builder.rs | 50 ++++++++++++++++++++++++++++ ethcore/light/src/types/request.rs | 4 +-- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/request_builder.rs index 3533026a5..867bb6dcc 100644 --- a/ethcore/light/src/request_builder.rs +++ b/ethcore/light/src/request_builder.rs @@ -114,3 +114,53 @@ impl Requests { /// Get access to the underlying slice of requests. pub fn requests(&self) -> &[Request] { &self.requests } } + +#[cfg(test)] +mod tests { + use request::*; + use super::RequestBuilder; + use util::H256; + + #[test] + fn all_scalar() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })).unwrap(); + builder.push(Request::Receipts(IncompleteReceiptsRequest { + hash: H256::default().into(), + })).unwrap(); + } + + #[test] + #[should_panic] + fn missing_backref() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(100, 3), + })).unwrap(); + } + + #[test] + #[should_panic] + fn wrong_kind() { + let mut builder = RequestBuilder::default(); + assert!(builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })).is_ok()); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(0, 0), + })).unwrap(); + } + + #[test] + fn good_backreference() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), // header proof puts hash at output 0. + })).unwrap(); + builder.push(Request::Receipts(IncompleteReceiptsRequest { + hash: Field::BackReference(0, 0), + })).unwrap(); + } +} diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 880bb99f2..1a1276951 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -649,7 +649,7 @@ pub mod header_proof { } fn note_outputs(&self, mut note: F) where F: FnMut(usize, OutputKind) { - note(1, OutputKind::Hash); + note(0, OutputKind::Hash); } fn fill(self, oracle: F) -> Result @@ -691,7 +691,7 @@ pub mod header_proof { impl Response { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { - f(1, Output::Hash(self.hash)); + f(0, Output::Hash(self.hash)); } } From 8fb0a2d417d82eefd9f5b8358088ea72d5c0bed4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 20:11:22 +0100 Subject: [PATCH 28/91] move request_builder -> request::builder --- ethcore/light/src/lib.rs | 1 - ethcore/light/src/net/context.rs | 2 +- ethcore/light/src/net/mod.rs | 5 ++--- ethcore/light/src/net/request_set.rs | 4 ++-- .../src/{request_builder.rs => types/request/builder.rs} | 0 ethcore/light/src/types/{request.rs => request/mod.rs} | 4 ++++ 6 files changed, 9 insertions(+), 7 deletions(-) rename ethcore/light/src/{request_builder.rs => types/request/builder.rs} (100%) rename ethcore/light/src/types/{request.rs => request/mod.rs} (99%) diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index b15c85242..81a974192 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -38,7 +38,6 @@ pub mod net; //pub mod on_demand; pub mod transaction_queue; pub mod cache; -pub mod request_builder; #[cfg(not(feature = "ipc"))] pub mod provider; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 513388a92..9eafead57 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -20,7 +20,7 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; -use request_builder::Requests; +use request::Requests; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 0241cf3f1..7929f7b43 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -33,8 +33,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{Request, Response}; -use request_builder::Requests; +use request::{Request, Requests, Response}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; @@ -665,7 +664,7 @@ impl LightProtocol { // the maximum amount of requests we'll fill in a single packet. const MAX_REQUESTS: usize = 256; - use ::request_builder::RequestBuilder; + use ::request::RequestBuilder; use ::request::CompleteRequest; let peers = self.peers.read(); diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index f66b44f6e..a2391ef6f 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -25,7 +25,7 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; use request::Request; -use request_builder::Requests; +use request::Requests; use net::{timeout, ReqId}; use time::{Duration, SteadyTime}; @@ -130,7 +130,7 @@ fn compute_timeout(reqs: &Requests) -> Duration { #[cfg(test)] mod tests { use net::ReqId; - use request_builder::RequestBuilder; + use request::RequestBuilder; use time::{SteadyTime, Duration}; use super::{RequestSet, compute_timeout}; diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/types/request/builder.rs similarity index 100% rename from ethcore/light/src/request_builder.rs rename to ethcore/light/src/types/request/builder.rs diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request/mod.rs similarity index 99% rename from ethcore/light/src/types/request.rs rename to ethcore/light/src/types/request/mod.rs index 1a1276951..383e1a06a 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -19,6 +19,8 @@ use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::H256; +mod builder; + // re-exports of request types. pub use self::header::{ Complete as CompleteHeadersRequest, @@ -61,6 +63,8 @@ pub use self::execution::{ Response as ExecutionResponse, }; +pub use self::builder::{RequestBuilder, Requests}; + /// Error indicating a reference to a non-existent or wrongly-typed output. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NoSuchOutput; From 391eb4b66c01634ca4665366419c4296aa048225 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 9 Mar 2017 16:55:13 +0100 Subject: [PATCH 29/91] get network tests working --- ethcore/light/src/net/mod.rs | 15 +- ethcore/light/src/net/request_credits.rs | 9 + ethcore/light/src/net/tests/mod.rs | 309 +++++++++++++---------- ethcore/light/src/types/request/mod.rs | 35 ++- 4 files changed, 224 insertions(+), 144 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 7929f7b43..402f3ac3a 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -46,8 +46,8 @@ mod error; mod status; mod request_set; -// #[cfg(test)] -// mod tests; +#[cfg(test)] +mod tests; pub mod request_credits; @@ -660,7 +660,7 @@ impl LightProtocol { } // Receive requests from a peer. - fn request(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + fn request(&self, peer_id: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { // the maximum amount of requests we'll fill in a single packet. const MAX_REQUESTS: usize = 256; @@ -668,7 +668,7 @@ impl LightProtocol { use ::request::CompleteRequest; let peers = self.peers.read(); - let peer = match peers.get(peer) { + let peer = match peers.get(peer_id) { Some(peer) => peer, None => { debug!(target: "pip", "Ignoring request from unknown peer"); @@ -680,7 +680,10 @@ impl LightProtocol { let req_id: u64 = raw.val_at(0)?; let mut request_builder = RequestBuilder::default(); + trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); + // deserialize requests, check costs and request validity. + peer.local_credits.deduct_cost(self.flow_params.base_cost())?; for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; peer.local_credits.deduct_cost(self.flow_params.compute_cost(&request))?; @@ -688,6 +691,8 @@ impl LightProtocol { } let requests = request_builder.build(); + let num_requests = requests.requests().len(); + trace!(target: "pip", "Beginning to respond to requests (id: {}) from peer {}", req_id, peer_id); // respond to all requests until one fails. let responses = requests.respond_to_all(|complete_req| { @@ -703,6 +708,8 @@ impl LightProtocol { } }); + trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); + io.respond(packet::RESPONSE, { let mut stream = RlpStream::new_list(3); let cur_credits = peer.local_credits.current(); diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 4f7f8a6a3..29f0fff95 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -215,6 +215,9 @@ impl FlowParams { /// Get a reference to the cost table. pub fn cost_table(&self) -> &CostTable { &self.costs } + /// Get the base cost of a request. + pub fn base_cost(&self) -> U256 { self.costs.base } + /// Get a reference to the recharge rate. pub fn recharge_rate(&self) -> &U256 { &self.recharge } @@ -233,6 +236,12 @@ impl FlowParams { } } + /// Compute the cost of a set of requests. + /// This is the base cost plus the cost of each individual request. + pub fn compute_cost_multi(&self, requests: &[Request]) -> U256 { + requests.iter().fold(self.costs.base, |cost, req| cost + self.compute_cost(req)) + } + /// Create initial credits. pub fn create_credits(&self) -> Credits { Credits { diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 6a9de1467..bc7ab2e10 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -27,15 +27,31 @@ use network::{PeerId, NodeId}; use net::request_credits::FlowParams; use net::context::IoContext; use net::status::{Capabilities, Status, write_handshake}; -use net::{encode_request, LightProtocol, Params, packet, Peer}; +use net::{LightProtocol, Params, packet, Peer}; use provider::Provider; -use request::{self, Request, Headers}; +use request; +use request::*; use rlp::*; -use util::{Address, Bytes, DBValue, H256, U256}; +use util::{Address, H256, U256}; use std::sync::Arc; +// helper for encoding a single request into a packet. +// panics on bad backreference. +fn encode_single(request: Request) -> Requests { + let mut builder = RequestBuilder::default(); + builder.push(request).unwrap(); + builder.build() +} + +// helper for making a packet out of `Requests`. +fn make_packet(req_id: usize, requests: &Requests) -> Vec { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id).append(&requests.requests()); + stream.out() +} + // expected result from a call. #[derive(Debug, PartialEq, Eq)] enum Expect { @@ -99,35 +115,45 @@ impl Provider for TestProvider { self.0.client.block_header(id) } - fn block_body(&self, id: BlockId) -> Option { - self.0.client.block_body(id) + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + self.0.client.block_body(req) } - fn block_receipts(&self, hash: &H256) -> Option { - self.0.client.block_receipts(&hash) + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { + self.0.client.block_receipts(req) } - fn state_proof(&self, req: request::StateProof) -> Vec { - match req.key2 { - Some(_) => vec![::util::sha3::SHA3_NULL_RLP.to_vec()], - None => { - // sort of a leaf node - let mut stream = RlpStream::new_list(2); - stream.append(&req.key1).append_empty_data(); - vec![stream.out()] - } - } + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + // sort of a leaf node + let mut stream = RlpStream::new_list(2); + stream.append(&req.address_hash).append_empty_data(); + Some(AccountResponse { + proof: vec![stream.out()], + balance: 10.into(), + nonce: 100.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }) } - fn contract_code(&self, req: request::ContractCode) -> Bytes { - req.account_key.iter().chain(req.account_key.iter()).cloned().collect() + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + Some(StorageResponse { + proof: vec![::rlp::encode(&req.key_hash).to_vec()], + value: req.key_hash | req.address_hash, + }) } - fn header_proof(&self, _req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + Some(CodeResponse { + code: req.block_hash.iter().chain(req.code_hash.iter()).cloned().collect(), + }) + } + + fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { None } - fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { None } @@ -226,14 +252,15 @@ fn credit_overflow() { } // 1000 requests is far too many for the default flow params. - let request = encode_request(&Request::Headers(Headers { - start: 1.into(), + let requests = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), max: 1000, skip: 0, reverse: false, - }), 111); + })); + let request = make_packet(111, &requests); - proto.handle_packet(&Expect::Punish(1), &1, packet::GET_BLOCK_HEADERS, &request); + proto.handle_packet(&Expect::Punish(1), &1, packet::REQUEST, &request); } // test the basic request types -- these just make sure that requests are parsed @@ -259,33 +286,36 @@ fn get_block_headers() { proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } - let request = Headers { - start: 1.into(), + let request = Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), max: 10, skip: 0, reverse: false, - }; + }); + let req_id = 111; - let request_body = encode_request(&Request::Headers(request.clone()), req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(headers.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); - let mut response_stream = RlpStream::new_list(3); + let response = vec![Response::Headers(HeadersResponse { + headers: headers, + })]; - response_stream.append(&req_id).append(&new_creds).begin_list(10); - for header in headers { - response_stream.append_raw(&header.into_inner(), 1); - } + let mut stream = RlpStream::new_list(3); + stream.append(&req_id).append(&new_creds).append(&response); - response_stream.out() + stream.out() }; - let expected = Expect::Respond(packet::BLOCK_HEADERS, response); - proto.handle_packet(&expected, &1, packet::GET_BLOCK_HEADERS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -308,33 +338,32 @@ fn get_block_bodies() { proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } - let request = request::Bodies { - block_hashes: (0..10).map(|i| - provider.client.block_header(BlockId::Number(i)).unwrap().hash() - ).collect() - }; + let mut builder = RequestBuilder::default(); + let mut bodies = Vec::new(); + for i in 0..10 { + let hash = provider.client.block_header(BlockId::Number(i)).unwrap().hash(); + builder.push(Request::Body(IncompleteBodyRequest { + hash: hash.into(), + })).unwrap(); + bodies.push(Response::Body(provider.client.block_body(CompleteBodyRequest { + hash: hash, + }).unwrap())); + } let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let request_body = encode_request(&Request::Bodies(request.clone()), req_id); let response = { - let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect(); - assert_eq!(bodies.len(), 10); - - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(10); - for body in bodies { - response_stream.append_raw(&body.into_inner(), 1); - } - + response_stream.append(&req_id).append(&new_creds).append(&bodies); response_stream.out() }; - let expected = Expect::Respond(packet::BLOCK_BODIES, response); - proto.handle_packet(&expected, &1, packet::GET_BLOCK_BODIES, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -359,36 +388,37 @@ fn get_block_receipts() { // find the first 10 block hashes starting with `f` because receipts are only provided // by the test client in that case. - let block_hashes: Vec<_> = (0..1000).map(|i| - provider.client.block_header(BlockId::Number(i)).unwrap().hash() - ).filter(|hash| format!("{}", hash).starts_with("f")).take(10).collect(); + let block_hashes: Vec = (0..1000) + .map(|i| provider.client.block_header(BlockId::Number(i)).unwrap().hash()) + .filter(|hash| format!("{}", hash).starts_with("f")) + .take(10) + .collect(); - let request = request::Receipts { - block_hashes: block_hashes.clone(), - }; + let mut builder = RequestBuilder::default(); + let mut receipts = Vec::new(); + for hash in block_hashes.iter().cloned() { + builder.push(Request::Receipts(IncompleteReceiptsRequest { hash: hash.into() })).unwrap(); + receipts.push(Response::Receipts(provider.client.block_receipts(CompleteReceiptsRequest { + hash: hash + }).unwrap())); + } let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let request_body = encode_request(&Request::Receipts(request.clone()), req_id); let response = { - let receipts: Vec<_> = block_hashes.iter() - .map(|hash| provider.client.block_receipts(hash).unwrap()) - .collect(); + assert_eq!(receipts.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(receipts.len()); - for block_receipts in receipts { - response_stream.append_raw(&block_receipts, 1); - } - + response_stream.append(&req_id).append(&new_creds).append(&receipts); response_stream.out() }; - let expected = Expect::Respond(packet::RECEIPTS, response); - proto.handle_packet(&expected, &1, packet::GET_RECEIPTS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -397,8 +427,9 @@ fn get_state_proofs() { let capabilities = capabilities(); let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let provider = TestProvider(provider); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.0.client.chain_info()); { let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); @@ -407,40 +438,45 @@ fn get_state_proofs() { } let req_id = 112; - let key1 = U256::from(11223344).into(); - let key2 = U256::from(99988887).into(); + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let request = Request::StateProofs (request::StateProofs { - requests: vec![ - request::StateProof { block: H256::default(), key1: key1, key2: None, from_level: 0 }, - request::StateProof { block: H256::default(), key1: key1, key2: Some(key2), from_level: 0}, - ] - }); + let mut builder = RequestBuilder::default(); + builder.push(Request::Account(IncompleteAccountRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + })).unwrap(); + builder.push(Request::Storage(IncompleteStorageRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + key_hash: key2.into(), + })).unwrap(); - let request_body = encode_request(&request, req_id); + let requests = builder.build(); + + let request_body = make_packet(req_id, &requests); let response = { - let proofs = vec![ - { let mut stream = RlpStream::new_list(2); stream.append(&key1).append_empty_data(); vec![stream.out()] }, - vec![::util::sha3::SHA3_NULL_RLP.to_vec()], + let responses = vec![ + Response::Account(provider.account_proof(CompleteAccountRequest { + block_hash: H256::default(), + address_hash: key1, + }).unwrap()), + Response::Storage(provider.storage_proof(CompleteStorageRequest { + block_hash: H256::default(), + address_hash: key1, + key_hash: key2, + }).unwrap()), ]; - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(2); - for proof in proofs { - response_stream.begin_list(proof.len()); - for node in proof { - response_stream.append_raw(&node, 1); - } - } - + response_stream.append(&req_id).append(&new_creds).append(&responses); response_stream.out() }; - let expected = Expect::Respond(packet::PROOFS, response); - proto.handle_packet(&expected, &1, packet::GET_PROOFS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -459,37 +495,31 @@ fn get_contract_code() { } let req_id = 112; - let key1 = U256::from(11223344).into(); - let key2 = U256::from(99988887).into(); + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let request = Request::Codes (request::ContractCodes { - code_requests: vec![ - request::ContractCode { block_hash: H256::default(), account_key: key1 }, - request::ContractCode { block_hash: H256::default(), account_key: key2 }, - ], + let request = Request::Code(IncompleteCodeRequest { + block_hash: key1.into(), + code_hash: key2.into(), }); - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); let response = { - let codes: Vec> = vec![ - key1.iter().chain(key1.iter()).cloned().collect(), - key2.iter().chain(key2.iter()).cloned().collect(), - ]; + let response = vec![Response::Code(CodeResponse { + code: key1.iter().chain(key2.iter()).cloned().collect(), + })]; - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).begin_list(2); - for code in codes { - response_stream.append(&code); - } - + response_stream.append(&req_id).append(&new_creds).append(&response); response_stream.out() }; - let expected = Expect::Respond(packet::CONTRACT_CODES, response); - proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -508,8 +538,8 @@ fn proof_of_execution() { } let req_id = 112; - let mut request = Request::TransactionProof (request::TransactionProof { - at: H256::default(), + let mut request = Request::Execution(request::IncompleteExecutionRequest { + block_hash: H256::default().into(), from: Address::default(), action: Action::Call(Address::default()), gas: 100.into(), @@ -519,9 +549,11 @@ fn proof_of_execution() { }); // first: a valid amount to request execution of. - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::TransactionProof, 100); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); response_stream.append(&req_id).append(&new_creds).begin_list(0); @@ -529,17 +561,19 @@ fn proof_of_execution() { response_stream.out() }; - let expected = Expect::Respond(packet::TRANSACTION_PROOF, response); - proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); // next: way too much requested gas. - if let Request::TransactionProof(ref mut req) = request { + if let Request::Execution(ref mut req) = request { req.gas = 100_000_000.into(); } let req_id = 113; - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let expected = Expect::Punish(1); - proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -554,12 +588,13 @@ fn id_guard() { let req_id_1 = ReqId(5143); let req_id_2 = ReqId(1111); - let req = Request::Headers(request::Headers { - start: 5u64.into(), + + let req = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(5u64).into(), max: 100, skip: 0, reverse: false, - }); + })); let peer_id = 9876; @@ -579,15 +614,15 @@ fn id_guard() { failed_requests: Vec::new(), })); - // first, supply wrong request type. + // first, malformed responses. { let mut stream = RlpStream::new_list(3); stream.append(&req_id_1.0); stream.append(&4_000_000usize); - stream.begin_list(0); + stream.begin_list(2).append(&125usize).append(&3usize); let packet = stream.out(); - assert!(proto.block_bodies(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); } // next, do an unexpected response. @@ -598,7 +633,7 @@ fn id_guard() { stream.begin_list(0); let packet = stream.out(); - assert!(proto.receipts(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); } // lastly, do a valid (but empty) response. @@ -609,7 +644,7 @@ fn id_guard() { stream.begin_list(0); let packet = stream.out(); - assert!(proto.block_headers(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_ok()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_ok()); } let peers = proto.peers.read(); diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 383e1a06a..58a6ac717 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -260,7 +260,10 @@ impl Decodable for Request { impl Encodable for Request { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2).append(&self.kind()); + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&self.kind(), s); match *self { Request::Headers(ref req) => s.append(req), @@ -441,7 +444,10 @@ impl Decodable for Response { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2).append(&self.kind()); + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&self.kind(), s); match *self { Response::Headers(ref res) => s.append(res), @@ -916,7 +922,7 @@ pub mod block_body { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.append_raw(&self.body.rlp().as_raw(), 2); + s.append_raw(&self.body.rlp().as_raw(), 1); } } } @@ -1684,4 +1690,27 @@ mod tests { check_roundtrip(res); check_roundtrip(full_res); } + + #[test] + fn vec_test() { + use rlp::*; + + let reqs: Vec<_> = (0..10).map(|_| IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::ethcore::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }).map(Request::Execution).collect(); + + let mut stream = RlpStream::new_list(2); + stream.append(&100usize).append(&reqs); + let out = stream.out(); + + let rlp = UntrustedRlp::new(&out); + assert_eq!(rlp.val_at::(0).unwrap(), 100usize); + assert_eq!(rlp.val_at::>(1).unwrap(), reqs); + } } From 64342d200c76634e65e461df1ed22ab5da418955 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 9 Mar 2017 17:28:49 +0100 Subject: [PATCH 30/91] return only complete headers responses --- ethcore/light/src/provider.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 36653fe4d..7854330e4 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -62,7 +62,7 @@ pub trait Provider: Send + Sync { HashOrNumber::Number(start_num) => start_num, HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { None => { - trace!(target: "les_provider", "Unknown block hash {} requested", hash); + trace!(target: "pip_provider", "Unknown block hash {} requested", hash); return None; } Some(header) => { @@ -91,7 +91,11 @@ pub trait Provider: Send + Sync { .flat_map(|x| x) .collect(); - Some(::request::HeadersResponse { headers: headers }) + if headers.is_empty() { + None + } else { + Some(::request::HeadersResponse { headers: headers }) + } } /// Get a block header by id. @@ -182,7 +186,7 @@ impl Provider for T { let cht_number = match cht::block_to_cht_number(req.num) { Some(cht_num) => cht_num, None => { - debug!(target: "les_provider", "Requested CHT proof with invalid block number"); + debug!(target: "pip_provider", "Requested CHT proof with invalid block number"); return None; } }; @@ -230,7 +234,7 @@ impl Provider for T { }), Ok(None) => None, Err(e) => { - debug!(target: "les_provider", "Error looking up number in freshly-created CHT: {}", e); + debug!(target: "pip_provider", "Error looking up number in freshly-created CHT: {}", e); None } } From f0a587d31081bb8d1098d8b2f8bb4f9bde060ab5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 13 Mar 2017 13:36:03 +0100 Subject: [PATCH 31/91] request builder improvements --- ethcore/light/src/provider.rs | 2 +- ethcore/light/src/types/request/builder.rs | 87 +++++++++++++--------- ethcore/light/src/types/request/mod.rs | 9 +++ 3 files changed, 63 insertions(+), 35 deletions(-) diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 7854330e4..aa8869e20 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -82,7 +82,7 @@ pub trait Provider: Send + Sync { } }; - let headers = (0u64..req.max as u64) + let headers: Vec<_> = (0u64..req.max as u64) .map(|x: u64| x.saturating_mul(req.skip + 1)) .take_while(|x| if req.reverse { x < &start_num } else { best_num.saturating_sub(start_num) >= *x }) .map(|x| if req.reverse { start_num - x } else { start_num + x }) diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 867bb6dcc..cdd3a086f 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -21,7 +21,7 @@ use std::collections::HashMap; use request::{ IncompleteRequest, CompleteRequest, Request, - OutputKind, Output, NoSuchOutput, Response, + OutputKind, Output, NoSuchOutput, Response, ResponseError, }; /// Build chained requests. Push them onto the series with `push`, @@ -34,7 +34,7 @@ pub struct RequestBuilder { impl RequestBuilder { /// Attempt to push a request onto the request chain. Fails if the request - /// references a non-existant output of a prior request. + /// references a non-existent output of a prior request. pub fn push(&mut self, request: Request) -> Result<(), NoSuchOutput> { request.check_outputs(|req, idx, kind| { match self.output_kinds.get(&(req, idx)) { @@ -48,13 +48,17 @@ impl RequestBuilder { Ok(()) } + /// Get a reference to the output kinds map. + pub fn output_kinds(&self) -> &HashMap<(usize, usize), OutputKind> { + &self.output_kinds + } + /// Convert this into a "requests" object. pub fn build(self) -> Requests { Requests { - output_kinds: self.output_kinds, outputs: HashMap::new(), requests: self.requests, - offset: 0, + answered: 0, } } } @@ -62,49 +66,27 @@ impl RequestBuilder { /// Requests pending responses. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Requests { - output_kinds: HashMap<(usize, usize), OutputKind>, outputs: HashMap<(usize, usize), Output>, requests: Vec, - offset: usize, // offset for splitting. + answered: usize, } impl Requests { /// For each request, produce responses for each. /// The responses vector produced goes up to the point where the responder /// first returns `None`, an invalid response, or until all requests have been responded to. - pub fn respond_to_all(self, responder: F) -> Vec + pub fn respond_to_all(mut self, responder: F) -> Vec where F: Fn(CompleteRequest) -> Option { let mut responses = Vec::new(); - let mut found_bad = false; - let offset = self.offset; - let output_kinds = self.output_kinds; - let mut outputs = self.outputs; - for (idx, req) in self.requests.into_iter().enumerate().map(|(idx, req)| (idx + offset, req)) { - let complete = req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) - .expect("All outputs checked as invariant of `Requests` object; qed"); - match responder(complete) { - Some(response) => { - response.fill_outputs(|out_idx, output| { - match output_kinds.get(&(idx, out_idx)) { - None => {}, - Some(out) => if out == &output.kind() { - outputs.insert((idx, out_idx), output); - } else { - // output kind doesn't match expected. - found_bad = true; - } - } - }); - - if found_bad { - return responses; - } - - responses.push(response); + while let Some(response) = self.next_complete().and_then(&responder) { + match self.supply_response(&response) { + Ok(()) => responses.push(response), + Err(e) => { + debug!(target: "pip", "produced bad response to request: {:?}", e); + return responses; } - None => return responses, } } @@ -112,7 +94,44 @@ impl Requests { } /// Get access to the underlying slice of requests. + // TODO: unimplemented -> Vec, // do we _have to_ allocate? pub fn requests(&self) -> &[Request] { &self.requests } + + /// Get the number of answered requests. + pub fn num_answered(&self) -> usize { self.answered } + + /// Get the next request as a filled request. Returns `None` when all requests answered. + pub fn next_complete(&self) -> Option { + if self.answered == self.requests.len() { + None + } else { + let outputs = &self.outputs; + Some(self.requests[self.answered].clone() + .fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + .expect("All outputs checked as invariant of `Requests` object; qed")) + } + } + + /// Supply a response for the next request. + /// Fails on: wrong request kind, all requests answered already. + pub fn supply_response(&mut self, response: &Response) -> Result<(), ResponseError> { + let idx = self.answered; + + // check validity. + if idx == self.requests.len() { return Err(ResponseError::Unexpected) } + if self.requests[idx].kind() != response.kind() { return Err(ResponseError::WrongKind) } + + let outputs = &mut self.outputs; + response.fill_outputs(|out_idx, output| { + // we don't need to check output kinds here because all back-references + // are validated in the builder. + // TODO: optimization for only storing outputs we "care about"? + outputs.insert((idx, out_idx), output); + }); + + self.answered += 1; + Ok(()) + } } #[cfg(test)] diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 58a6ac717..165dff742 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -69,6 +69,15 @@ pub use self::builder::{RequestBuilder, Requests}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NoSuchOutput; +/// Error on processing a response. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ResponseError { + /// Wrong kind of response. + WrongKind, + /// No responses expected. + Unexpected, +} + /// An input to a request. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Field { From 8bf5be0cc48ab12e0f9d9e2e924fcfda27aafe6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 13 Mar 2017 15:49:52 +0100 Subject: [PATCH 32/91] New version of jsonrpc. --- Cargo.lock | 132 ++++++++++----------- dapps/Cargo.toml | 32 ++--- dapps/src/api/api.rs | 49 ++++---- dapps/src/lib.rs | 90 +++++++------- dapps/src/router/host_validation.rs | 15 +-- dapps/src/router/mod.rs | 30 ++--- dapps/src/rpc.rs | 45 ++++--- dapps/src/tests/api.rs | 46 +++---- dapps/src/tests/helpers/mod.rs | 38 +++--- dapps/src/tests/redirection.rs | 22 ++-- ipfs/src/lib.rs | 76 +++++------- parity/dapps.rs | 27 ++--- parity/ipfs.rs | 69 +++++++---- parity/rpc.rs | 27 ++--- parity/run.rs | 3 - parity/signer.rs | 17 +-- rpc/Cargo.toml | 13 +- rpc/src/lib.rs | 102 ++++++++-------- rpc/src/v1/tests/mocked/eth.rs | 6 +- rpc/src/v1/tests/mocked/parity.rs | 8 +- rpc/src/v1/tests/mocked/parity_accounts.rs | 2 +- signer/Cargo.toml | 1 + signer/src/lib.rs | 11 +- signer/src/tests/mod.rs | 9 +- signer/src/ws_server/mod.rs | 26 ++-- signer/src/ws_server/session.rs | 21 ++-- stratum/src/lib.rs | 28 +++-- util/reactor/src/lib.rs | 4 +- 28 files changed, 469 insertions(+), 480 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b54bd27db..3877467a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,7 +29,7 @@ dependencies = [ "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -447,8 +447,9 @@ dependencies = [ "fetch 0.1.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -621,10 +622,10 @@ dependencies = [ "ethsync 1.7.0", "fetch 0.1.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-ipc-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", @@ -667,7 +668,8 @@ dependencies = [ "ethcore-io 1.7.0", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-ui 1.7.0", @@ -687,9 +689,9 @@ dependencies = [ "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", @@ -1083,38 +1085,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-http-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-ipc-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1124,26 +1125,44 @@ dependencies = [ [[package]] name = "jsonrpc-macros" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "jsonrpc-tcp-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +name = "jsonrpc-pubsub" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ - "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "jsonrpc-server-utils" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" +dependencies = [ + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "jsonrpc-tcp-server" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" +dependencies = [ + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1640,7 +1659,7 @@ dependencies = [ "ethcore 1.7.0", "ethcore-util 1.7.0", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -1677,7 +1696,7 @@ dependencies = [ "ethcore-signer 1.7.0", "ethcore-util 1.7.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2198,11 +2217,6 @@ name = "smallvec" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "smallvec" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "smallvec" version = "0.3.1" @@ -2273,11 +2287,6 @@ dependencies = [ name = "table" version = "0.1.0" -[[package]] -name = "take" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "target_info" version = "0.1.0" @@ -2352,22 +2361,6 @@ dependencies = [ "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "tokio-proto" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "tokio-service" version = "0.1.0" @@ -2626,11 +2619,13 @@ dependencies = [ "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d95557e7ba6b71377b0f2c3b3ae96c53f1b75a926a6901a500f557a370af730a" "checksum itoa 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91fd9dc2c587067de817fec4ad355e3818c3d893a78cab32a0a474c7a15bb8d5" -"checksum jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-ipc-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" @@ -2737,7 +2732,6 @@ dependencies = [ "checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410" -"checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" "checksum smallvec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3c84984c278afe61a46e19868e8b23e2ee3be5b3cc6dea6edad4893bc6c841" "checksum solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "172382bac9424588d7840732b250faeeef88942e37b6e35317dce98cafdd75b2" "checksum spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93bdab61c1a413e591c4d17388ffa859eaff2df27f1e13a5ec8b716700605adf" @@ -2746,7 +2740,6 @@ dependencies = [ "checksum syn 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f94368aae82bb29656c98443a7026ca931a659e8d19dcdc41d6e273054e820" "checksum syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "393b6dd0889df2b064beeea954cfda6bc2571604ac460deeae0fed55a53988af" "checksum syntex_syntax 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44bded3cabafc65c90b663b1071bd2d198a9ab7515e6ce729e4570aaf53c407e" -"checksum take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b157868d8ac1f56b64604539990685fa7611d8fa9e5476cf0c02cf34d32917c5" "checksum target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" "checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" "checksum term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "f2077e54d38055cf1ca0fd7933a2e00cd3ec8f6fed352b2a377f06dcdaaf3281" @@ -2756,7 +2749,6 @@ dependencies = [ "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" "checksum tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "52416b3e937abac22a543a7f1c66bd37feb60137ff1ab42390fa02df85347e58" -"checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" "checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72" diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 508fbc1a0..57fcf21de 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -8,33 +8,37 @@ authors = ["Parity Technologies "] [lib] [dependencies] -rand = "0.3" -log = "0.3" +base32 = "0.3" env_logger = "0.3" futures = "0.1" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } -unicase = "1.3" -url = "1.0" -rustc-serialize = "0.3" -serde = "0.9" -serde_json = "0.9" -serde_derive = "0.9" linked-hash-map = "0.3" -parity-dapps-glue = "1.4" -base32 = "0.3" +log = "0.3" mime = "0.2" mime_guess = "1.6.1" +rand = "0.3" +rustc-serialize = "0.3" +serde = "0.9" +serde_derive = "0.9" +serde_json = "0.9" time = "0.1.35" +unicase = "1.3" +url = "1.0" zip = { version = "0.1", default-features = false } + +hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } +jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } +# TODO [ToDr] Temporary solution, server should be merged with RPC. +jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git" } + ethcore-devtools = { path = "../devtools" } ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } fetch = { path = "../util/fetch" } -parity-ui = { path = "./ui" } +parity-dapps-glue = "1.4" parity-hash-fetch = { path = "../hash-fetch" } parity-reactor = { path = "../util/reactor" } +parity-ui = { path = "./ui" } clippy = { version = "0.0.103", optional = true} diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 9106e0d70..ce8f495e6 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -19,7 +19,6 @@ use unicase::UniCase; use hyper::{server, net, Decoder, Encoder, Next, Control}; use hyper::header; use hyper::method::Method; -use hyper::header::AccessControlAllowOrigin; use api::types::{App, ApiError}; use api::response; @@ -27,23 +26,20 @@ use apps::fetcher::Fetcher; use handlers::extract_url; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; -use jsonrpc_http_server::cors; +use jsonrpc_http_server; +use jsonrpc_server_utils::cors; #[derive(Clone)] pub struct RestApi { - cors_domains: Option>, + cors_domains: Option>, endpoints: Arc, fetcher: Arc, } impl RestApi { - pub fn new(cors_domains: Vec, endpoints: Arc, fetcher: Arc) -> Box { + pub fn new(cors_domains: Vec, endpoints: Arc, fetcher: Arc) -> Box { Box::new(RestApi { - cors_domains: Some(cors_domains.into_iter().map(|domain| match domain.as_ref() { - "all" | "*" | "any" => AccessControlAllowOrigin::Any, - "null" => AccessControlAllowOrigin::Null, - other => AccessControlAllowOrigin::Value(other.into()), - }).collect()), + cors_domains: Some(cors_domains), endpoints: endpoints, fetcher: fetcher, }) @@ -64,7 +60,7 @@ impl Endpoint for RestApi { struct RestApiRouter { api: RestApi, - origin: Option, + cors_header: Option, path: Option, control: Option, handler: Box, @@ -74,7 +70,7 @@ impl RestApiRouter { fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { RestApiRouter { path: Some(path), - origin: None, + cors_header: None, control: Some(control), api: api, handler: response::as_json_error(&ApiError { @@ -95,21 +91,22 @@ impl RestApiRouter { } /// Returns basic headers for a response (it may be overwritten by the handler) - fn response_headers(&self) -> header::Headers { + fn response_headers(cors_header: Option) -> header::Headers { let mut headers = header::Headers::new(); - headers.set(header::AccessControlAllowCredentials); - headers.set(header::AccessControlAllowMethods(vec![ - Method::Options, - Method::Post, - Method::Get, - ])); - headers.set(header::AccessControlAllowHeaders(vec![ - UniCase("origin".to_owned()), - UniCase("content-type".to_owned()), - UniCase("accept".to_owned()), - ])); - if let Some(cors_header) = cors::get_cors_header(&self.api.cors_domains, &self.origin) { + if let Some(cors_header) = cors_header { + headers.set(header::AccessControlAllowCredentials); + headers.set(header::AccessControlAllowMethods(vec![ + Method::Options, + Method::Post, + Method::Get, + ])); + headers.set(header::AccessControlAllowHeaders(vec![ + UniCase("origin".to_owned()), + UniCase("content-type".to_owned()), + UniCase("accept".to_owned()), + ])); + headers.set(cors_header); } @@ -120,7 +117,7 @@ impl RestApiRouter { impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { - self.origin = cors::read_origin(&request); + self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains); if let Method::Options = *request.method() { self.handler = response::empty(); @@ -164,7 +161,7 @@ impl server::Handler for RestApiRouter { } fn on_response(&mut self, res: &mut server::Response) -> Next { - *res.headers_mut() = self.response_headers(); + *res.headers_mut() = Self::response_headers(self.cors_header.take()); self.handler.on_response(res) } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 30c62a031..eca6fd991 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -20,25 +20,28 @@ #![cfg_attr(feature="nightly", plugin(clippy))] extern crate base32; +extern crate futures; extern crate hyper; -extern crate time; -extern crate url as url_lib; -extern crate unicase; +extern crate linked_hash_map; +extern crate mime_guess; +extern crate rand; +extern crate rustc_serialize; extern crate serde; extern crate serde_json; +extern crate time; +extern crate unicase; +extern crate url as url_lib; extern crate zip; -extern crate rand; + extern crate jsonrpc_core; extern crate jsonrpc_http_server; -extern crate mime_guess; -extern crate rustc_serialize; +extern crate jsonrpc_server_utils; + extern crate ethcore_rpc; extern crate ethcore_util as util; -extern crate parity_hash_fetch as hash_fetch; -extern crate linked_hash_map; extern crate fetch; extern crate parity_dapps_glue as parity_dapps; -extern crate futures; +extern crate parity_hash_fetch as hash_fetch; extern crate parity_reactor; #[macro_use] @@ -68,17 +71,19 @@ mod web; mod tests; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use std::net::SocketAddr; use std::collections::HashMap; -use ethcore_rpc::{Metadata}; +use jsonrpc_core::{Middleware, MetaIoHandler}; +use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote; +pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin}; + +use ethcore_rpc::Metadata; use fetch::{Fetch, Client as FetchClient}; use hash_fetch::urlhint::ContractClient; -use jsonrpc_core::Middleware; -use jsonrpc_core::reactor::RpcHandler; -use router::auth::{Authorization, NoAuth, HttpBasicAuth}; use parity_reactor::Remote; +use router::auth::{Authorization, NoAuth, HttpBasicAuth}; use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; @@ -110,8 +115,8 @@ pub struct ServerBuilder { sync_status: Arc, web_proxy_tokens: Arc, signer_address: Option<(String, u16)>, - allowed_hosts: Option>, - extra_cors: Option>, + allowed_hosts: Option>, + extra_cors: Option>, remote: Remote, fetch: Option, } @@ -172,15 +177,15 @@ impl ServerBuilder { /// Change allowed hosts. /// `None` - All hosts are allowed /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) - pub fn allowed_hosts(mut self, allowed_hosts: Option>) -> Self { - self.allowed_hosts = allowed_hosts; + pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { + self.allowed_hosts = allowed_hosts.into(); self } /// Extra cors headers. /// `None` - no additional CORS URLs - pub fn extra_cors_headers(mut self, cors: Option>) -> Self { - self.extra_cors = cors; + pub fn extra_cors_headers(mut self, cors: DomainsValidation) -> Self { + self.extra_cors = cors.into(); self } @@ -192,7 +197,7 @@ impl ServerBuilder { /// Asynchronously start server with no authentication, /// returns result with `Server` handle on success or an error. - pub fn start_unsecured_http>(self, addr: &SocketAddr, handler: RpcHandler) -> Result { + pub fn start_unsecured_http>(self, addr: &SocketAddr, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { let fetch = self.fetch_client()?; Server::start_http( addr, @@ -207,13 +212,14 @@ impl ServerBuilder { self.sync_status, self.web_proxy_tokens, self.remote, + tokio_remote, fetch, ) } /// Asynchronously start server with `HTTP Basic Authentication`, /// return result with `Server` handle on success or an error. - pub fn start_basic_auth_http>(self, addr: &SocketAddr, username: &str, password: &str, handler: RpcHandler) -> Result { + pub fn start_basic_auth_http>(self, addr: &SocketAddr, username: &str, password: &str, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { let fetch = self.fetch_client()?; Server::start_http( addr, @@ -228,6 +234,7 @@ impl ServerBuilder { self.sync_status, self.web_proxy_tokens, self.remote, + tokio_remote, fetch, ) } @@ -243,12 +250,11 @@ impl ServerBuilder { /// Webapps HTTP server. pub struct Server { server: Option, - panic_handler: Arc () + Send>>>>, } impl Server { /// Returns a list of allowed hosts or `None` if all hosts are allowed. - fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { + fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { let mut allowed = Vec::new(); match hosts { @@ -263,16 +269,19 @@ impl Server { } /// Returns a list of CORS domains for API endpoint. - fn cors_domains(signer_address: Option<(String, u16)>, extra_cors: Option>) -> Vec { + fn cors_domains( + signer_address: Option<(String, u16)>, + extra_cors: Option>, + ) -> Vec { let basic_cors = match signer_address { - Some(signer_address) => vec![ + Some(signer_address) => [ format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), format!("http://{}", address(&signer_address)), format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), format!("https://{}", address(&signer_address)), - ], + ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), None => vec![], }; @@ -284,10 +293,10 @@ impl Server { fn start_http>( addr: &SocketAddr, - hosts: Option>, - extra_cors: Option>, + hosts: Option>, + extra_cors: Option>, authorization: A, - handler: RpcHandler, + handler: MetaIoHandler, dapps_path: PathBuf, extra_dapps: Vec, signer_address: Option<(String, u16)>, @@ -295,9 +304,9 @@ impl Server { sync_status: Arc, web_proxy_tokens: Arc, remote: Remote, + tokio_remote: TokioRemote, fetch: F, ) -> Result { - let panic_handler = Arc::new(Mutex::new(None)); let authorization = Arc::new(authorization); let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), @@ -318,7 +327,7 @@ impl Server { let special = Arc::new({ let mut special = HashMap::new(); - special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, cors_domains.clone(), panic_handler.clone())); + special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, tokio_remote, cors_domains.clone())); special.insert(router::SpecialEndpoint::Utils, apps::utils()); special.insert( router::SpecialEndpoint::Api, @@ -346,17 +355,11 @@ impl Server { Server { server: Some(l), - panic_handler: panic_handler, } }) .map_err(ServerError::from) } - /// Set callback for panics. - pub fn set_panic_handler(&self, handler: F) where F : Fn() -> () + Send + 'static { - *self.panic_handler.lock().unwrap() = Some(Box::new(handler)); - } - #[cfg(test)] /// Returns address that this server is bound to. pub fn addr(&self) -> &SocketAddr { @@ -408,6 +411,7 @@ fn address(address: &(String, u16)) -> String { #[cfg(test)] mod util_tests { use super::Server; + use jsonrpc_http_server::AccessControlAllowOrigin; #[test] fn should_return_allowed_hosts() { @@ -432,18 +436,18 @@ mod util_tests { // when let none = Server::cors_domains(None, None); let some = Server::cors_domains(Some(("127.0.0.1".into(), 18180)), None); - let extra = Server::cors_domains(None, Some(vec!["all".to_owned()])); + let extra = Server::cors_domains(None, Some(vec!["all".into()])); // then - assert_eq!(none, Vec::::new()); + assert_eq!(none, Vec::::new()); assert_eq!(some, vec![ - "http://parity.web3.site".to_owned(), + "http://parity.web3.site".into(), "http://parity.web3.site:18180".into(), "http://127.0.0.1:18180".into(), "https://parity.web3.site".into(), "https://parity.web3.site:18180".into(), - "https://127.0.0.1:18180".into() + "https://127.0.0.1:18180".into(), ]); - assert_eq!(extra, vec!["all".to_owned()]); + assert_eq!(extra, vec![AccessControlAllowOrigin::Any]); } } diff --git a/dapps/src/router/host_validation.rs b/dapps/src/router/host_validation.rs index 9f40c177e..e5fcedd94 100644 --- a/dapps/src/router/host_validation.rs +++ b/dapps/src/router/host_validation.rs @@ -19,18 +19,13 @@ use apps::DAPPS_DOMAIN; use hyper::{server, header, StatusCode}; use hyper::net::HttpStream; -use jsonrpc_http_server::{is_host_header_valid}; use handlers::ContentHandler; +use jsonrpc_http_server; +use jsonrpc_server_utils::hosts; -pub fn is_valid(request: &server::Request, allowed_hosts: &[String], endpoints: Vec) -> bool { - let mut endpoints = endpoints.iter() - .map(|endpoint| format!("{}{}", endpoint, DAPPS_DOMAIN)) - .collect::>(); - endpoints.extend_from_slice(allowed_hosts); - - let header_valid = is_host_header_valid(request, &endpoints); - - match (header_valid, request.headers().get::()) { +pub fn is_valid(req: &server::Request, allowed_hosts: &Option>) -> bool { + let header_valid = jsonrpc_http_server::is_host_allowed(req, allowed_hosts); + match (header_valid, req.headers().get::()) { (true, _) => true, (_, Some(host)) => host.hostname.ends_with(DAPPS_DOMAIN), _ => false, diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index f34151552..0b4e632a6 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -24,14 +24,16 @@ use address; use std::cmp; use std::sync::Arc; use std::collections::HashMap; + use url::{Url, Host}; use hyper::{self, server, header, Next, Encoder, Decoder, Control, StatusCode}; use hyper::net::HttpStream; +use jsonrpc_server_utils::hosts; + use apps::{self, DAPPS_DOMAIN}; use apps::fetcher::Fetcher; use endpoint::{Endpoint, Endpoints, EndpointPath}; use handlers::{self, Redirection, ContentHandler}; -use self::auth::{Authorization, Authorized}; /// Special endpoints are accessible on every domain (every dapp) #[derive(Debug, PartialEq, Hash, Eq)] @@ -42,18 +44,18 @@ pub enum SpecialEndpoint { None, } -pub struct Router { +pub struct Router { control: Option, signer_address: Option<(String, u16)>, endpoints: Arc, fetch: Arc, special: Arc>>, authorization: Arc, - allowed_hosts: Option>, + allowed_hosts: Option>, handler: Box + Send>, } -impl server::Handler for Router { +impl server::Handler for Router { fn on_request(&mut self, req: server::Request) -> Next { // Choose proper handler depending on path / domain @@ -66,20 +68,18 @@ impl server::Handler for Router { trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); // Validate Host header - if let Some(ref hosts) = self.allowed_hosts { - trace!(target: "dapps", "Validating host headers against: {:?}", hosts); - let is_valid = is_utils || host_validation::is_valid(&req, hosts, self.endpoints.keys().cloned().collect()); - if !is_valid { - debug!(target: "dapps", "Rejecting invalid host header."); - self.handler = host_validation::host_invalid_response(); - return self.handler.on_request(req); - } + trace!(target: "dapps", "Validating host headers against: {:?}", self.allowed_hosts); + let is_valid = is_utils || host_validation::is_valid(&req, &self.allowed_hosts); + if !is_valid { + debug!(target: "dapps", "Rejecting invalid host header."); + self.handler = host_validation::host_invalid_response(); + return self.handler.on_request(req); } trace!(target: "dapps", "Checking authorization."); // Check authorization let auth = self.authorization.is_authorized(&req); - if let Authorized::No(handler) = auth { + if let auth::Authorized::No(handler) = auth { debug!(target: "dapps", "Authorization denied."); self.handler = handler; return self.handler.on_request(req); @@ -181,7 +181,7 @@ impl server::Handler for Router { } } -impl Router { +impl Router { pub fn new( control: Control, signer_address: Option<(String, u16)>, @@ -189,7 +189,7 @@ impl Router { endpoints: Arc, special: Arc>>, authorization: Arc, - allowed_hosts: Option>, + allowed_hosts: Option>, ) -> Self { let handler = special.get(&SpecialEndpoint::Utils) diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index cc6f4d81a..0c95051e4 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -14,46 +14,57 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use hyper; use ethcore_rpc::{Metadata, Origin}; -use jsonrpc_core::Middleware; -use jsonrpc_core::reactor::RpcHandler; -use jsonrpc_http_server::{Rpc, ServerHandler, PanicHandler, AccessControlAllowOrigin, HttpMetaExtractor}; +use jsonrpc_core::{Middleware, MetaIoHandler}; +use jsonrpc_http_server::{self as http, AccessControlAllowOrigin, HttpMetaExtractor}; +use jsonrpc_http_server::tokio_core::reactor::Remote; use endpoint::{Endpoint, EndpointPath, Handler}; pub fn rpc>( - handler: RpcHandler, - cors_domains: Vec, - panic_handler: Arc () + Send>>>>, + handler: MetaIoHandler, + remote: Remote, + cors_domains: Vec, ) -> Box { Box::new(RpcEndpoint { - handler: handler, + handler: Arc::new(handler), + remote: remote, meta_extractor: Arc::new(MetadataExtractor), - panic_handler: panic_handler, - cors_domain: Some(cors_domains.into_iter().map(AccessControlAllowOrigin::Value).collect()), + cors_domain: Some(cors_domains), // NOTE [ToDr] We don't need to do any hosts validation here. It's already done in router. allowed_hosts: None, }) } struct RpcEndpoint> { - handler: RpcHandler, + handler: Arc>, + remote: Remote, meta_extractor: Arc>, - panic_handler: Arc () + Send>>>>, cors_domain: Option>, - allowed_hosts: Option>, + allowed_hosts: Option>, +} + +#[derive(Default)] +struct NoopMiddleware; +impl http::RequestMiddleware for NoopMiddleware { + fn on_request(&self, _request: &hyper::server::Request) -> http::RequestMiddlewareAction { + http::RequestMiddlewareAction::Proceed + } } impl> Endpoint for RpcEndpoint { fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box { - let panic_handler = PanicHandler { handler: self.panic_handler.clone() }; - Box::new(ServerHandler::new( - Rpc::new(self.handler.clone(), self.meta_extractor.clone()), + Box::new(http::ServerHandler::new( + http::Rpc { + handler: self.handler.clone(), + remote: self.remote.clone(), + extractor: self.meta_extractor.clone(), + }, self.cors_domain.clone(), self.allowed_hosts.clone(), - panic_handler, + Arc::new(NoopMiddleware), control, )) } diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index 1b9f64b7f..73467e854 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -33,8 +33,8 @@ fn should_return_error() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); - assert_eq!(response.headers.get(3).unwrap(), "Content-Type: application/json"); + response.assert_status("HTTP/1.1 404 Not Found"); + response.assert_header("Content-Type", "application/json"); assert_eq!(response.body, format!("58\n{}\n0\n\n", r#"{"code":"404","title":"Not Found","detail":"Resource you requested has not been found."}"#)); assert_security_headers(&response.headers); } @@ -56,8 +56,8 @@ fn should_serve_apps() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.headers.get(3).unwrap(), "Content-Type: application/json"); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Content-Type", "application/json"); assert!(response.body.contains("Parity UI"), response.body); assert_security_headers(&response.headers); } @@ -79,8 +79,8 @@ fn should_handle_ping() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.headers.get(3).unwrap(), "Content-Type: application/json"); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Content-Type", "application/json"); assert_eq!(response.body, "0\n\n".to_owned()); assert_security_headers(&response.headers); } @@ -102,7 +102,7 @@ fn should_try_to_resolve_dapp() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + response.assert_status("HTTP/1.1 404 Not Found"); assert_eq!(registrar.calls.lock().len(), 2); assert_security_headers(&response.headers); } @@ -125,12 +125,8 @@ fn should_return_signer_port_cors_headers() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: http://127.0.0.1:18180"), - "CORS header for signer missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "http://127.0.0.1:18180"); } #[test] @@ -151,12 +147,8 @@ fn should_return_signer_port_cors_headers_for_home_parity() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: http://parity.web3.site"), - "CORS header for parity.web3.site missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site"); } @@ -178,12 +170,8 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_https() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: https://parity.web3.site"), - "CORS header for parity.web3.site missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "https://parity.web3.site"); } #[test] @@ -204,12 +192,8 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_port() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: http://parity.web3.site:18180"), - "CORS header for parity.web3.site missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180"); } #[test] diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index d1a1e9900..d1466c77c 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -21,13 +21,12 @@ use std::sync::Arc; use env_logger::LogBuilder; use ethcore_rpc::Metadata; use jsonrpc_core::MetaIoHandler; -use jsonrpc_core::reactor::RpcEventLoop; use ServerBuilder; use Server; use fetch::Fetch; use devtools::http_client; -use parity_reactor::Remote; +use parity_reactor::{EventLoop, Remote}; mod registrar; mod fetch; @@ -48,7 +47,7 @@ fn init_logger() { pub struct ServerLoop { pub server: Server, - pub event_loop: RpcEventLoop, + pub event_loop: EventLoop, } impl Deref for ServerLoop { @@ -70,13 +69,12 @@ pub fn init_server(process: F, io: MetaIoHandler, remote: Remote // TODO [ToDr] When https://github.com/ethcore/jsonrpc/issues/26 is resolved // this additional EventLoop wouldn't be needed, we should be able to re-use remote. - let event_loop = RpcEventLoop::spawn(); - let handler = event_loop.handler(Arc::new(io)); + let event_loop = EventLoop::spawn(); let server = process(ServerBuilder::new( &dapps_path, registrar.clone(), remote, )) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), handler).unwrap(); + .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io, event_loop.raw_remote()).unwrap(); ( ServerLoop { server: server, event_loop: event_loop }, registrar, @@ -89,12 +87,12 @@ pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { let mut dapps_path = env::temp_dir(); dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); - let event_loop = RpcEventLoop::spawn(); - let handler = event_loop.handler(Arc::new(MetaIoHandler::default())); - let server = ServerBuilder::new(&dapps_path, registrar, Remote::new(event_loop.remote())) + let event_loop = EventLoop::spawn(); + let io = MetaIoHandler::default(); + let server = ServerBuilder::new(&dapps_path, registrar, event_loop.remote()) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .allowed_hosts(None) - .start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, handler).unwrap(); + .allowed_hosts(None.into()) + .start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, io, event_loop.raw_remote()).unwrap(); ServerLoop { server: server, event_loop: event_loop, @@ -102,26 +100,28 @@ pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { } pub fn serve_with_rpc(io: MetaIoHandler) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None), io, Remote::new_sync()).0 + init_server(|builder| builder.allowed_hosts(None.into()), io, Remote::new_sync()).0 } pub fn serve_hosts(hosts: Option>) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(hosts), Default::default(), Remote::new_sync()).0 + let hosts = hosts.map(|hosts| hosts.into_iter().map(Into::into).collect()); + init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 } pub fn serve_extra_cors(extra_cors: Option>) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None).extra_cors_headers(extra_cors), Default::default(), Remote::new_sync()).0 + let extra_cors = extra_cors.map(|cors| cors.into_iter().map(Into::into).collect()); + init_server(|builder| builder.allowed_hosts(None.into()).extra_cors_headers(extra_cors.into()), Default::default(), Remote::new_sync()).0 } pub fn serve_with_registrar() -> (ServerLoop, Arc) { - init_server(|builder| builder.allowed_hosts(None), Default::default(), Remote::new_sync()) + init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()) } pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc) { init_server(|builder| { builder .sync_status(Arc::new(|| true)) - .allowed_hosts(None) + .allowed_hosts(None.into()) }, Default::default(), Remote::new_sync()) } @@ -133,7 +133,7 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, reg) = init_server(move |builder| { - builder.allowed_hosts(None).fetch(f.clone()) + builder.allowed_hosts(None.into()).fetch(f.clone()) }, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); (server, fetch, reg) @@ -144,7 +144,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { let f = fetch.clone(); let (server, _) = init_server(move |builder| { builder - .allowed_hosts(None) + .allowed_hosts(None.into()) .fetch(f.clone()) .web_proxy_tokens(Arc::new(move |token| &token == web_token)) }, Default::default(), Remote::new_sync()); @@ -153,7 +153,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { } pub fn serve() -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None), Default::default(), Remote::new_sync()).0 + init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()).0 } pub fn request(server: ServerLoop, request: &str) -> http_client::Response { diff --git a/dapps/src/tests/redirection.rs b/dapps/src/tests/redirection.rs index 8b529a851..4e3fff4dc 100644 --- a/dapps/src/tests/redirection.rs +++ b/dapps/src/tests/redirection.rs @@ -32,7 +32,7 @@ fn should_redirect_to_home() { ); // then - assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + response.assert_status("HTTP/1.1 302 Found"); assert_eq!(response.headers.get(0).unwrap(), "Location: http://127.0.0.1:18180"); } @@ -52,7 +52,7 @@ fn should_redirect_to_home_when_trailing_slash_is_missing() { ); // then - assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + response.assert_status("HTTP/1.1 302 Found"); assert_eq!(response.headers.get(0).unwrap(), "Location: http://127.0.0.1:18180"); } @@ -72,7 +72,7 @@ fn should_redirect_to_home_for_users_with_cached_redirection() { ); // then - assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + response.assert_status("HTTP/1.1 302 Found"); assert_eq!(response.headers.get(0).unwrap(), "Location: http://127.0.0.1:18180"); } @@ -92,7 +92,7 @@ fn should_display_404_on_invalid_dapp() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + response.assert_status("HTTP/1.1 404 Not Found"); assert_security_headers_for_embed(&response.headers); } @@ -112,7 +112,7 @@ fn should_display_404_on_invalid_dapp_with_domain() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + response.assert_status("HTTP/1.1 404 Not Found"); assert_security_headers_for_embed(&response.headers); } @@ -134,8 +134,8 @@ fn should_serve_rpc() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); + response.assert_status("HTTP/1.1 200 OK"); + assert_eq!(response.body, format!("4C\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error"},"id":null}"#)); } #[test] @@ -156,8 +156,8 @@ fn should_serve_rpc_at_slash_rpc() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); + response.assert_status("HTTP/1.1 200 OK"); + assert_eq!(response.body, format!("4C\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error"},"id":null}"#)); } @@ -178,7 +178,7 @@ fn should_serve_proxy_pac() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + response.assert_status("HTTP/1.1 200 OK"); assert_eq!(response.body, "DD\n\nfunction FindProxyForURL(url, host) {\n\tif (shExpMatch(host, \"parity.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:18180\";\n\t}\n\n\tif (shExpMatch(host, \"*.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:8080\";\n\t}\n\n\treturn \"DIRECT\";\n}\n\n0\n\n".to_owned()); assert_security_headers(&response.headers); } @@ -200,7 +200,7 @@ fn should_serve_utils() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + response.assert_status("HTTP/1.1 200 OK"); assert_eq!(response.body.contains("function(){"), true); assert_security_headers(&response.headers); } diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 80e910f9e..287abb27c 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -33,13 +33,13 @@ use std::sync::Arc; use std::net::{SocketAddr, IpAddr}; use error::ServerError; use route::Out; -use jsonrpc_http_server::cors; use hyper::server::{Listening, Handler, Request, Response}; use hyper::net::HttpStream; -use hyper::header::{Vary, ContentLength, ContentType, AccessControlAllowOrigin}; +use hyper::header::{self, Vary, ContentLength, ContentType}; use hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use ethcore::client::BlockChainClient; +pub use jsonrpc_http_server::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler pub struct IpfsHandler { @@ -47,12 +47,12 @@ pub struct IpfsHandler { out: Out, /// How many bytes from the response have been written out_progress: usize, - /// Origin request header - origin: Option, + /// CORS response header + cors_header: Option, /// Allowed CORS domains cors_domains: Option>, /// Hostnames allowed in the `Host` request header - allowed_hosts: Option>, + allowed_hosts: Option>, /// Reference to the Blockchain Client client: Arc, } @@ -62,50 +62,27 @@ impl IpfsHandler { &*self.client } - pub fn new(cors: Option>, hosts: Option>, client: Arc) -> Self { - fn origin_to_header(origin: String) -> AccessControlAllowOrigin { - match origin.as_str() { - "*" => AccessControlAllowOrigin::Any, - "null" | "" => AccessControlAllowOrigin::Null, - _ => AccessControlAllowOrigin::Value(origin), - } - } - + pub fn new(cors: DomainsValidation, hosts: DomainsValidation, client: Arc) -> Self { IpfsHandler { out: Out::Bad("Invalid Request"), out_progress: 0, - origin: None, - cors_domains: cors.map(|vec| vec.into_iter().map(origin_to_header).collect()), - allowed_hosts: hosts, + cors_header: None, + cors_domains: cors.into(), + allowed_hosts: hosts.into(), client: client, } } - fn is_host_allowed(&self, req: &Request) -> bool { - match self.allowed_hosts { - Some(ref hosts) => jsonrpc_http_server::is_host_header_valid(&req, hosts), - None => true, + fn is_origin_allowed(&self, origin_provided: bool) -> bool { + match (origin_provided, self.cors_header.as_ref()) { + // Request without Origin are always OK. + (false, _) => true, + // If there is a cors header to be returned it's ok. + (true, Some(_)) => true, + // If origin is provided and we won't return cors header it's bad. + (true, None) => false, } } - - fn is_origin_allowed(&self) -> bool { - // Check origin header first, no header passed is good news - let origin = match self.origin { - Some(ref origin) => origin, - None => return true, - }; - - let cors_domains = match self.cors_domains { - Some(ref domains) => domains, - None => return false, - }; - - cors_domains.iter().any(|domain| match *domain { - AccessControlAllowOrigin::Value(ref allowed) => origin == allowed, - AccessControlAllowOrigin::Any => true, - AccessControlAllowOrigin::Null => origin == "", - }) - } } /// Implement Hyper's HTTP handler @@ -115,15 +92,15 @@ impl Handler for IpfsHandler { return Next::write(); } - self.origin = cors::read_origin(&req); + self.cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); - if !self.is_host_allowed(&req) { + if !jsonrpc_http_server::is_host_allowed(&req, &self.allowed_hosts) { self.out = Out::Bad("Disallowed Host header"); return Next::write(); } - if !self.is_origin_allowed() { + if !self.is_origin_allowed(req.headers().get::().is_some()) { self.out = Out::Bad("Disallowed Origin header"); return Next::write(); @@ -176,7 +153,7 @@ impl Handler for IpfsHandler { } } - if let Some(cors_header) = cors::get_cors_header(&self.cors_domains, &self.origin) { + if let Some(cors_header) = self.cors_header.take() { res.headers_mut().set(cors_header); res.headers_mut().set(Vary::Items(vec!["Origin".into()])); } @@ -219,11 +196,11 @@ fn write_chunk(transport: &mut W, progress: &mut usize, data: &[u8]) - } /// Add current interface (default: "127.0.0.1:5001") to list of allowed hosts -fn include_current_interface(mut hosts: Vec, interface: String, port: u16) -> Vec { +fn include_current_interface(mut hosts: Vec, interface: String, port: u16) -> Vec { hosts.push(match port { 80 => interface, _ => format!("{}:{}", interface, port), - }); + }.into()); hosts } @@ -231,14 +208,15 @@ fn include_current_interface(mut hosts: Vec, interface: String, port: u1 pub fn start_server( port: u16, interface: String, - cors: Option>, - hosts: Option>, + cors: DomainsValidation, + hosts: DomainsValidation, client: Arc ) -> Result { let ip: IpAddr = interface.parse().map_err(|_| ServerError::InvalidInterface)?; let addr = SocketAddr::new(ip, port); - let hosts = hosts.map(move |hosts| include_current_interface(hosts, interface, port)); + let hosts: Option> = hosts.into(); + let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); Ok( hyper::Server::http(&addr)? diff --git a/parity/dapps.rs b/parity/dapps.rs index b9094c16d..bbd5f4960 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -23,9 +23,8 @@ use ethcore_rpc::informant::RpcStats; use ethsync::SyncProvider; use hash_fetch::fetch::Client as FetchClient; use helpers::replace_home; -use io::PanicHandler; -use jsonrpc_core::reactor::Remote; use rpc_apis::{self, SignerService}; +use parity_reactor; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { @@ -60,11 +59,10 @@ impl Default for Configuration { } pub struct Dependencies { - pub panic_handler: Arc, pub apis: Arc, pub client: Arc, pub sync: Arc, - pub remote: Remote, + pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, pub stats: Arc, @@ -137,9 +135,9 @@ mod server { use ansi_term::Colour; use ethcore::transaction::{Transaction, Action}; use ethcore::client::{Client, BlockChainClient, BlockId}; + use ethcore_dapps::{AccessControlAllowOrigin, Host}; use ethcore_rpc::is_major_importing; use hash_fetch::urlhint::ContractClient; - use jsonrpc_core::reactor::RpcHandler; use parity_reactor; use rpc_apis; @@ -162,6 +160,8 @@ mod server { Arc::new(Registrar { client: deps.client.clone() }), parity_reactor::Remote::new(deps.remote.clone()), ); + let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + let cors: Option> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); let sync = deps.sync.clone(); let client = deps.client.clone(); @@ -172,8 +172,8 @@ mod server { .web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token))) .extra_dapps(&extra_dapps) .signer_address(deps.signer.address()) - .allowed_hosts(allowed_hosts) - .extra_cors_headers(cors); + .allowed_hosts(allowed_hosts.into()) + .extra_cors_headers(cors.into()); let api_set = if all_apis { warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Dapps with all APIs exposed.")); @@ -183,13 +183,12 @@ mod server { rpc_apis::ApiSet::UnsafeContext }; let apis = rpc_apis::setup_rpc(deps.stats, deps.apis.clone(), api_set); - let handler = RpcHandler::new(Arc::new(apis), deps.remote); let start_result = match auth { None => { - server.start_unsecured_http(url, handler) + server.start_unsecured_http(url, apis, deps.remote) }, Some((username, password)) => { - server.start_basic_auth_http(url, &username, &password, handler) + server.start_basic_auth_http(url, &username, &password, apis, deps.remote) }, }; @@ -199,13 +198,7 @@ mod server { _ => Err(format!("WebApps io error: {}", err)), }, Err(e) => Err(format!("WebApps error: {:?}", e)), - Ok(server) => { - let ph = deps.panic_handler; - server.set_panic_handler(move || { - ph.notify_all("Panic in WebApp thread.".to_owned()); - }); - Ok(server) - }, + Ok(server) => Ok(server), } } diff --git a/parity/ipfs.rs b/parity/ipfs.rs index e33dcf68b..760868f91 100644 --- a/parity/ipfs.rs +++ b/parity/ipfs.rs @@ -1,40 +1,59 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + use std::sync::Arc; -use parity_ipfs_api; +use parity_ipfs_api::{self, AccessControlAllowOrigin, Host}; use parity_ipfs_api::error::ServerError; use ethcore::client::BlockChainClient; use hyper::server::Listening; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { - pub enabled: bool, - pub port: u16, - pub interface: String, - pub cors: Option>, - pub hosts: Option>, + pub enabled: bool, + pub port: u16, + pub interface: String, + pub cors: Option>, + pub hosts: Option>, } impl Default for Configuration { - fn default() -> Self { - Configuration { - enabled: false, - port: 5001, - interface: "127.0.0.1".into(), - cors: None, - hosts: Some(Vec::new()), - } - } + fn default() -> Self { + Configuration { + enabled: false, + port: 5001, + interface: "127.0.0.1".into(), + cors: None, + hosts: Some(Vec::new()), + } + } } pub fn start_server(conf: Configuration, client: Arc) -> Result, ServerError> { - if !conf.enabled { - return Ok(None); - } + if !conf.enabled { + return Ok(None); + } - parity_ipfs_api::start_server( - conf.port, - conf.interface, - conf.cors, - conf.hosts, - client - ).map(Some) + let cors = conf.cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); + let hosts = conf.hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + + parity_ipfs_api::start_server( + conf.port, + conf.interface, + cors.into(), + hosts.into(), + client + ).map(Some) } diff --git a/parity/rpc.rs b/parity/rpc.rs index 49bd94699..b0af8aa0b 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -18,19 +18,18 @@ use std::fmt; use std::sync::Arc; use std::net::SocketAddr; use std::io; -use io::PanicHandler; use dir::default_data_path; -use ethcore_rpc::{self as rpc, RpcServerError, IpcServerError, Metadata, Origin}; +use ethcore_rpc::{self as rpc, HttpServerError, IpcServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use ethcore_rpc::informant::{RpcStats, Middleware}; use helpers::parity_ipc_path; use hyper; use jsonrpc_core::MetaIoHandler; -use jsonrpc_core::reactor::{RpcHandler, Remote}; use rpc_apis; use rpc_apis::ApiSet; +use parity_reactor::TokioRemote; -pub use ethcore_rpc::{IpcServer, Server as HttpServer}; +pub use ethcore_rpc::{IpcServer, HttpServer}; #[derive(Debug, PartialEq)] pub struct HttpConfiguration { @@ -84,9 +83,8 @@ impl fmt::Display for IpcConfiguration { } pub struct Dependencies { - pub panic_handler: Arc, pub apis: Arc, - pub remote: Remote, + pub remote: TokioRemote, pub stats: Arc, } @@ -123,12 +121,13 @@ pub fn setup_http_rpc_server( allowed_hosts: Option>, apis: ApiSet ) -> Result { - let apis = setup_apis(apis, dependencies); - let handler = RpcHandler::new(Arc::new(apis), dependencies.remote.clone()); - let ph = dependencies.panic_handler.clone(); - let start_result = rpc::start_http(url, cors_domains, allowed_hosts, ph, handler, RpcExtractor); + let handler = setup_apis(apis, dependencies); + let remote = dependencies.remote.clone(); + let cors_domains: Option> = cors_domains.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); + let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + let start_result = rpc::start_http(url, cors_domains.into(), allowed_hosts.into(), handler, remote, RpcExtractor); match start_result { - Err(RpcServerError::IoError(err)) => match err.kind() { + Err(HttpServerError::IoError(err)) => match err.kind() { io::ErrorKind::AddrInUse => Err(format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)), _ => Err(format!("RPC io error: {}", err)), }, @@ -143,9 +142,9 @@ pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result Result, String> { - let apis = setup_apis(apis, dependencies); - let handler = RpcHandler::new(Arc::new(apis), dependencies.remote.clone()); - match rpc::start_ipc(addr, handler) { + let handler = setup_apis(apis, dependencies); + let remote = dependencies.remote.clone(); + match rpc::start_ipc(addr, handler, remote) { Err(IpcServerError::Io(io_error)) => Err(format!("RPC io error: {}", io_error)), Err(any_error) => Err(format!("Rpc error: {:?}", any_error)), Ok(server) => Ok(server) diff --git a/parity/run.rs b/parity/run.rs index b25ed3188..9e5c4f33a 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -434,7 +434,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R }); let dependencies = rpc::Dependencies { - panic_handler: panic_handler.clone(), apis: deps_for_rpc_apis.clone(), remote: event_loop.raw_remote(), stats: rpc_stats.clone(), @@ -446,7 +445,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // the dapps server let dapps_deps = dapps::Dependencies { - panic_handler: panic_handler.clone(), apis: deps_for_rpc_apis.clone(), client: client.clone(), sync: sync_provider.clone(), @@ -459,7 +457,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // the signer server let signer_deps = signer::Dependencies { - panic_handler: panic_handler.clone(), apis: deps_for_rpc_apis.clone(), remote: event_loop.raw_remote(), rpc_stats: rpc_stats.clone(), diff --git a/parity/signer.rs b/parity/signer.rs index 346276496..0d71604d4 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -26,8 +26,7 @@ use ethcore_rpc::informant::RpcStats; use ethcore_rpc; use ethcore_signer as signer; use helpers::replace_home; -use io::{ForwardPanic, PanicHandler}; -use jsonrpc_core::reactor::{RpcHandler, Remote}; +use parity_reactor::TokioRemote; use rpc_apis; use util::path::restrict_permissions_owner; use util::H256; @@ -57,9 +56,8 @@ impl Default for Configuration { } pub struct Dependencies { - pub panic_handler: Arc, pub apis: Arc, - pub remote: Remote, + pub remote: TokioRemote, pub rpc_stats: Arc, } @@ -143,9 +141,9 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result Result Err(format!("Trusted Signer io error: {}", err)), }, Err(e) => Err(format!("Trusted Signer Error: {:?}", e)), - Ok(server) => { - deps.panic_handler.forward_from(&server); - Ok(server) - }, + Ok(server) => Ok(server), } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 13ce8962f..e31255254 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -10,18 +10,20 @@ authors = ["Parity Technologies "] [dependencies] futures = "0.1" log = "0.3" +order-stat = "0.1" +rustc-serialize = "0.3" semver = "0.5" serde = "0.9" -serde_json = "0.9" serde_derive = "0.9" -rustc-serialize = "0.3" +serde_json = "0.9" time = "0.1" transient-hashmap = "0.1" -order-stat = "0.1" + jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-ipc-server = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git" } + ethcore-io = { path = "../util/io" } ethcore-ipc = { path = "../ipc/rpc" } ethcore-util = { path = "../util" } @@ -35,11 +37,12 @@ ethjson = { path = "../json" } ethcore-devtools = { path = "../devtools" } ethcore-light = { path = "../ethcore/light" } parity-updater = { path = "../updater" } +parity-reactor = { path = "../util/reactor" } rlp = { path = "../util/rlp" } fetch = { path = "../util/fetch" } -parity-reactor = { path = "../util/reactor" } -clippy = { version = "0.0.103", optional = true} stats = { path = "../util/stats" } +clippy = { version = "0.0.103", optional = true} + [features] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 201f41c22..7a9ee5a22 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -19,31 +19,32 @@ #![cfg_attr(feature="nightly", feature(plugin))] #![cfg_attr(feature="nightly", plugin(clippy))] -extern crate semver; -extern crate rustc_serialize; -extern crate serde; -extern crate serde_json; -extern crate jsonrpc_core; -extern crate jsonrpc_http_server; - -extern crate ethcore_io as io; -extern crate ethcore; -extern crate ethkey; -extern crate ethcrypto as crypto; -extern crate ethstore; -extern crate ethsync; -extern crate ethash; -extern crate ethcore_light as light; -extern crate transient_hashmap; -extern crate jsonrpc_ipc_server as ipc; -extern crate ethcore_ipc; -extern crate time; -extern crate rlp; -extern crate fetch; extern crate futures; extern crate order_stat; -extern crate parity_updater as updater; +extern crate rustc_serialize; +extern crate semver; +extern crate serde; +extern crate serde_json; +extern crate time; +extern crate transient_hashmap; + +extern crate jsonrpc_core; +pub extern crate jsonrpc_http_server as http; +pub extern crate jsonrpc_ipc_server as ipc; + +extern crate ethash; +extern crate ethcore; +extern crate ethcore_io as io; +extern crate ethcore_ipc; +extern crate ethcore_light as light; +extern crate ethcrypto as crypto; +extern crate ethkey; +extern crate ethstore; +extern crate ethsync; +extern crate fetch; extern crate parity_reactor; +extern crate parity_updater as updater; +extern crate rlp; extern crate stats; #[macro_use] @@ -60,57 +61,50 @@ extern crate ethjson; #[cfg(test)] extern crate ethcore_devtools as devtools; -use std::sync::Arc; -use std::net::SocketAddr; -use io::PanicHandler; -use jsonrpc_core::reactor::RpcHandler; +pub mod v1; pub use ipc::{Server as IpcServer, Error as IpcServerError}; -pub use jsonrpc_http_server::{ServerBuilder, Server, RpcServerError, HttpMetaExtractor}; -pub mod v1; +pub use http::{HttpMetaExtractor, Server as HttpServer, Error as HttpServerError, AccessControlAllowOrigin, Host}; + pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; pub use v1::block_import::is_major_importing; +use std::net::SocketAddr; +use http::tokio_core; + /// Start http server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_http( +pub fn start_http( addr: &SocketAddr, - cors_domains: Option>, - allowed_hosts: Option>, - panic_handler: Arc, - handler: RpcHandler, + cors_domains: http::DomainsValidation, + allowed_hosts: http::DomainsValidation, + handler: H, + remote: tokio_core::reactor::Remote, extractor: T, -) -> Result where +) -> Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, + H: Into>, T: HttpMetaExtractor, { - - let cors_domains = cors_domains.map(|domains| { - domains.into_iter() - .map(|v| match v.as_str() { - "*" => jsonrpc_http_server::AccessControlAllowOrigin::Any, - "null" => jsonrpc_http_server::AccessControlAllowOrigin::Null, - v => jsonrpc_http_server::AccessControlAllowOrigin::Value(v.into()), - }) - .collect() - }); - - ServerBuilder::with_rpc_handler(handler) - .meta_extractor(Arc::new(extractor)) + http::ServerBuilder::new(handler) + .event_loop_remote(remote) + .meta_extractor(extractor) .cors(cors_domains.into()) .allowed_hosts(allowed_hosts.into()) - .panic_handler(move || { - panic_handler.notify_all("Panic in RPC thread.".to_owned()); - }) .start_http(addr) } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_ipc>( +pub fn start_ipc( addr: &str, - handler: RpcHandler, -) -> Result, ipc::Error> { - let server = ipc::Server::with_rpc_handler(addr, handler)?; + handler: H, + remote: tokio_core::reactor::Remote, +) -> Result, ipc::Error> where + M: jsonrpc_core::Metadata, + S: jsonrpc_core::Middleware, + H: Into>, +{ + let server = ipc::Server::with_remote(addr, handler, ipc::UninitializedRemote::Shared(remote))?; server.run_async()?; Ok(server) } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 2432b55e7..f6687f818 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -875,7 +875,7 @@ fn rpc_eth_send_transaction_with_bad_to() { "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid length.","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid length."},"id":1}"#; assert_eq!(tester.io.handle_request_sync(&request), Some(response.into())); } @@ -1058,7 +1058,7 @@ fn rpc_get_work_returns_no_work_if_cant_mine() { eth_tester.client.set_queue_size(10); let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing.","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing."},"id":1}"#; assert_eq!(eth_tester.io.handle_request_sync(request), Some(response.to_owned())); } @@ -1117,6 +1117,6 @@ fn rpc_get_work_should_timeout() { // Request with timeout of 10 seconds. This should fail. let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": ["10"], "id": 1}"#; - let err_response = r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Work has not changed.","data":null},"id":1}"#; + let err_response = r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Work has not changed."},"id":1}"#; assert_eq!(eth_tester.io.handle_request_sync(request), Some(err_response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index a587554a3..082f7ef34 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -346,7 +346,7 @@ fn rpc_parity_unsigned_transactions_count_when_signer_disabled() { let io = deps.default_client(); let request = r#"{"jsonrpc": "2.0", "method": "parity_unsignedTransactionsCount", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available.","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available."},"id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } @@ -382,7 +382,7 @@ fn rpc_parity_signer_port() { // when let request = r#"{"jsonrpc": "2.0", "method": "parity_signerPort", "params": [], "id": 1}"#; let response1 = r#"{"jsonrpc":"2.0","result":18180,"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available.","data":null},"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available."},"id":1}"#; // then assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); @@ -400,7 +400,7 @@ fn rpc_parity_dapps_port() { // when let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsPort", "params": [], "id": 1}"#; let response1 = r#"{"jsonrpc":"2.0","result":18080,"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available.","data":null},"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#; // then assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); @@ -418,7 +418,7 @@ fn rpc_parity_dapps_interface() { // when let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsInterface", "params": [], "id": 1}"#; let response1 = r#"{"jsonrpc":"2.0","result":"127.0.0.1","id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available.","data":null},"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#; // then assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/rpc/src/v1/tests/mocked/parity_accounts.rs index 304ffd45e..72c1aef2c 100644 --- a/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -230,7 +230,7 @@ fn should_be_able_to_kill_account() { let address = accounts[0]; let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0xf00baba2f00baba2f00baba2f00baba2f00baba2"], "id": 1}}"#); - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid length 1, expected a tuple of size 2","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid length 1, expected a tuple of size 2"},"id":1}"#; let res = tester.io.handle_request_sync(&request); assert_eq!(res, Some(response.into())); diff --git a/signer/Cargo.toml b/signer/Cargo.toml index ba33bad68..22964104b 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -13,6 +13,7 @@ rustc_version = "0.1" [dependencies] rand = "0.3.14" jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } +jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git" } log = "0.3" env_logger = "0.3" parity-dapps-glue = { version = "1.4", optional = true } diff --git a/signer/src/lib.rs b/signer/src/lib.rs index 5cc103ba8..d211e2eac 100644 --- a/signer/src/lib.rs +++ b/signer/src/lib.rs @@ -30,21 +30,23 @@ //! //! ``` //! extern crate jsonrpc_core; +//! extern crate jsonrpc_server_utils; //! extern crate ethcore_signer; //! extern crate ethcore_rpc; //! //! use std::sync::Arc; //! use jsonrpc_core::IoHandler; -//! use jsonrpc_core::reactor::RpcEventLoop; +//! use jsonrpc_server_utils::reactor::RpcEventLoop; //! use ethcore_signer::ServerBuilder; //! use ethcore_rpc::ConfirmationsQueue; //! //! fn main() { //! let queue = Arc::new(ConfirmationsQueue::default()); -//! let io = Arc::new(IoHandler::new().into()); -//! let event_loop = RpcEventLoop::spawn(); +//! let io = IoHandler::default(); +//! let event_loop = RpcEventLoop::spawn().unwrap(); +//! let remote = event_loop.remote(); //! let _server = ServerBuilder::new(queue, "/tmp/authcodes".into()) -//! .start("127.0.0.1:8084".parse().unwrap(), event_loop.handler(io)); +//! .start("127.0.0.1:8084".parse().unwrap(), io, remote); //! } //! ``` @@ -57,6 +59,7 @@ extern crate ethcore_util as util; extern crate ethcore_rpc as rpc; extern crate ethcore_io as io; extern crate jsonrpc_core; +extern crate jsonrpc_server_utils; extern crate ws; extern crate ethcore_devtools as devtools; diff --git a/signer/src/tests/mod.rs b/signer/src/tests/mod.rs index 7de3a167a..bc90a6cd3 100644 --- a/signer/src/tests/mod.rs +++ b/signer/src/tests/mod.rs @@ -22,7 +22,7 @@ use devtools::RandomTempPath; use rpc::ConfirmationsQueue; use jsonrpc_core::IoHandler; -use jsonrpc_core::reactor::RpcEventLoop; +use jsonrpc_server_utils::reactor::RpcEventLoop; use rand; use ServerBuilder; @@ -70,9 +70,10 @@ pub fn serve() -> (ServerLoop, usize, GuardedAuthCodes) { let queue = Arc::new(ConfirmationsQueue::default()); let builder = ServerBuilder::new(queue, path.to_path_buf()); let port = 35000 + rand::random::() % 10000; - let event_loop = RpcEventLoop::spawn(); - let handler = event_loop.handler(Arc::new(IoHandler::default().into())); - let server = builder.start(format!("127.0.0.1:{}", port).parse().unwrap(), handler).unwrap(); + let event_loop = RpcEventLoop::spawn().unwrap(); + let io = IoHandler::default(); + let remote = event_loop.remote(); + let server = builder.start(format!("127.0.0.1:{}", port).parse().unwrap(), io, remote).unwrap(); let res = ServerLoop { server: server, event_loop: event_loop, diff --git a/signer/src/ws_server/mod.rs b/signer/src/ws_server/mod.rs index b799b0f66..314351938 100644 --- a/signer/src/ws_server/mod.rs +++ b/signer/src/ws_server/mod.rs @@ -26,8 +26,8 @@ use std::thread; use std; use io::{PanicHandler, OnPanicListener, MayPanic}; -use jsonrpc_core::{Metadata, Middleware}; -use jsonrpc_core::reactor::RpcHandler; +use jsonrpc_core::{Metadata, Middleware, MetaIoHandler}; +use jsonrpc_server_utils::tokio_core::reactor::Remote; use rpc::{ConfirmationsQueue}; use rpc::informant::RpcStats; @@ -92,21 +92,28 @@ impl ServerBuilder { /// Starts a new `WebSocket` server in separate thread. /// Returns a `Server` handle which closes the server when droped. - pub fn start>(self, addr: SocketAddr, handler: RpcHandler) -> Result { - self.start_with_extractor(addr, handler, NoopExtractor) + pub fn start, H: Into>>( + self, + addr: SocketAddr, + handler: H, + remote: Remote, + ) -> Result { + self.start_with_extractor(addr, handler, remote, NoopExtractor) } /// Starts a new `WebSocket` server in separate thread. /// Returns a `Server` handle which closes the server when droped. - pub fn start_with_extractor, T: session::MetaExtractor>( + pub fn start_with_extractor, H: Into>, T: session::MetaExtractor>( self, addr: SocketAddr, - handler: RpcHandler, + handler: H, + remote: Remote, meta_extractor: T, ) -> Result { Server::start( addr, - handler, + handler.into(), + remote, self.queue, self.authcodes_path, self.skip_origin_validation, @@ -136,7 +143,8 @@ impl Server { /// Returns a `Server` handle which closes the server when droped. fn start, T: session::MetaExtractor>( addr: SocketAddr, - handler: RpcHandler, + handler: MetaIoHandler, + remote: Remote, queue: Arc, authcodes_path: PathBuf, skip_origin_validation: bool, @@ -156,7 +164,7 @@ impl Server { let origin = format!("{}", addr); let port = addr.port(); let ws = ws::Builder::new().with_settings(config).build( - session::Factory::new(handler, origin, port, authcodes_path, skip_origin_validation, stats, meta_extractor) + session::Factory::new(handler, remote, origin, port, authcodes_path, skip_origin_validation, stats, meta_extractor) )?; let panic_handler = PanicHandler::new_in_arc(); diff --git a/signer/src/ws_server/session.rs b/signer/src/ws_server/session.rs index 5194855ab..91984ff05 100644 --- a/signer/src/ws_server/session.rs +++ b/signer/src/ws_server/session.rs @@ -21,8 +21,9 @@ use std::sync::Arc; use std::str::FromStr; use authcode_store::AuthCodes; -use jsonrpc_core::{Metadata, Middleware}; -use jsonrpc_core::reactor::RpcHandler; +use jsonrpc_core::{Metadata, Middleware, MetaIoHandler}; +use jsonrpc_core::futures::Future; +use jsonrpc_server_utils::tokio_core::reactor::Remote; use rpc::informant::RpcStats; use util::{H256, version}; use ws; @@ -145,7 +146,8 @@ pub struct Session, T> { self_origin: String, self_port: u16, authcodes_path: PathBuf, - handler: RpcHandler, + handler: Arc>, + remote: Remote, file_handler: Arc, stats: Option>, meta_extractor: T, @@ -237,7 +239,7 @@ impl, T: MetaExtractor> ws::Handler for Session // TODO [ToDr] Move to on_connect let metadata = self.meta_extractor.extract_metadata(&self.session_id); - self.handler.handle_request(req, metadata, move |response| { + let future = self.handler.handle_request(req, metadata).map(move |response| { if let Some(result) = response { let res = out.send(result); if let Err(e) = res { @@ -245,12 +247,14 @@ impl, T: MetaExtractor> ws::Handler for Session } } }); + self.remote.spawn(move |_| future); Ok(()) } } pub struct Factory, T> { - handler: RpcHandler, + handler: Arc>, + remote: Remote, skip_origin_validation: bool, self_origin: String, self_port: u16, @@ -262,7 +266,8 @@ pub struct Factory, T> { impl, T> Factory { pub fn new( - handler: RpcHandler, + handler: MetaIoHandler, + remote: Remote, self_origin: String, self_port: u16, authcodes_path: PathBuf, @@ -271,7 +276,8 @@ impl, T> Factory { meta_extractor: T, ) -> Self { Factory { - handler: handler, + handler: Arc::new(handler), + remote: remote, skip_origin_validation: skip_origin_validation, self_origin: self_origin, self_port: self_port, @@ -293,6 +299,7 @@ impl, T: MetaExtractor> ws::Factory for Factory session_id: 0.into(), out: sender, handler: self.handler.clone(), + remote: self.remote.clone(), skip_origin_validation: self.skip_origin_validation, self_origin: self.self_origin.clone(), self_port: self.self_port, diff --git a/stratum/src/lib.rs b/stratum/src/lib.rs index 59964773c..8aac33655 100644 --- a/stratum/src/lib.rs +++ b/stratum/src/lib.rs @@ -44,8 +44,8 @@ pub use traits::{ }; use jsonrpc_tcp_server::{ - Server as JsonRpcServer, RequestContext, MetaExtractor, Dispatcher, - PushMessageError + Server as JsonRpcServer, ServerBuilder as JsonRpcServerBuilder, + RequestContext, MetaExtractor, Dispatcher, PushMessageError, }; use jsonrpc_core::{MetaIoHandler, Params, to_value, Value, Metadata, Compatibility}; use jsonrpc_macros::IoDelegate; @@ -57,6 +57,8 @@ use util::{H256, Hashable, RwLock, RwLockReadGuard}; type RpcResult = BoxFuture; +const NOTIFY_COUNTER_INITIAL: u32 = 16; + struct StratumRpc { stratum: RwLock>>, } @@ -112,7 +114,7 @@ impl MetaExtractor for PeerMetaExtractor { } pub struct Stratum { - rpc_server: JsonRpcServer, + rpc_server: Option, /// Subscribed clients subscribers: RwLock>, /// List of workers supposed to receive job update @@ -129,7 +131,11 @@ pub struct Stratum { tcp_dispatcher: Dispatcher, } -const NOTIFY_COUNTER_INITIAL: u32 = 16; +impl Drop for Stratum { + fn drop(&mut self) { + self.rpc_server.take().map(|server| server.close()); + } +} impl Stratum { pub fn start( @@ -148,12 +154,14 @@ impl Stratum { let mut handler = MetaIoHandler::::with_compatibility(Compatibility::Both); handler.extend_with(delegate); - let server = JsonRpcServer::new(addr.clone(), Arc::new(handler)) - .extractor(Arc::new(PeerMetaExtractor) as Arc>); + let server = JsonRpcServerBuilder::new(handler) + .session_meta_extractor(PeerMetaExtractor); + let tcp_dispatcher = server.dispatcher(); + let server = server.start(addr)?; let stratum = Arc::new(Stratum { - tcp_dispatcher: server.dispatcher(), - rpc_server: server, + tcp_dispatcher: tcp_dispatcher, + rpc_server: Some(server), subscribers: RwLock::new(Vec::new()), job_que: RwLock::new(HashSet::new()), dispatcher: dispatcher, @@ -162,10 +170,6 @@ impl Stratum { notify_counter: RwLock::new(NOTIFY_COUNTER_INITIAL), }); *rpc.stratum.write() = Some(stratum.clone()); - - let running_stratum = stratum.clone(); - ::std::thread::spawn(move || running_stratum.rpc_server.run()); - Ok(stratum) } diff --git a/util/reactor/src/lib.rs b/util/reactor/src/lib.rs index 73ce9e404..c1d7f8631 100644 --- a/util/reactor/src/lib.rs +++ b/util/reactor/src/lib.rs @@ -24,7 +24,7 @@ use std::thread; use std::sync::mpsc; use std::time::Duration; use futures::{Future, IntoFuture}; -use self::tokio_core::reactor::{Remote as TokioRemote, Timeout}; +pub use tokio_core::reactor::{Remote as TokioRemote, Timeout}; /// Event Loop for futures. /// Wrapper around `tokio::reactor::Core`. @@ -47,7 +47,7 @@ impl EventLoop { let remote = rx.recv().expect("tx is transfered to a newly spawned thread."); EventLoop { - remote: Remote{ + remote: Remote { inner: Mode::Tokio(remote), }, handle: EventLoopHandle { From 599f81daa9193529db06c6e2d1df180ce8c2a549 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 13 Mar 2017 16:06:58 +0100 Subject: [PATCH 33/91] split request filling into fill,complete --- ethcore/light/src/types/request/builder.rs | 9 +- ethcore/light/src/types/request/mod.rs | 291 +++++++++++---------- 2 files changed, 155 insertions(+), 145 deletions(-) diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index cdd3a086f..77f1389c2 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -105,9 +105,8 @@ impl Requests { if self.answered == self.requests.len() { None } else { - let outputs = &self.outputs; Some(self.requests[self.answered].clone() - .fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + .complete() .expect("All outputs checked as invariant of `Requests` object; qed")) } } @@ -130,6 +129,12 @@ impl Requests { }); self.answered += 1; + + // fill as much of the next request as we can. + if let Some(ref mut req) = self.requests.get_mut(self.answered) { + req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + } + Ok(()) } } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 165dff742..1ebe1c75b 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -88,6 +88,16 @@ pub enum Field { BackReference(usize, usize), } +impl Field { + // attempt conversion into scalar value. + fn into_scalar(self) -> Result { + match self { + Field::Scalar(val) => Ok(val), + _ => Err(NoSuchOutput), + } + } +} + impl From for Field { fn from(val: T) -> Self { Field::Scalar(val) @@ -318,19 +328,30 @@ impl IncompleteRequest for Request { } } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - Ok(match self { - Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)?), - Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)?), - Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)?), - Request::Body(req) => CompleteRequest::Body(req.fill(oracle)?), - Request::Account(req) => CompleteRequest::Account(req.fill(oracle)?), - Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)?), - Request::Code(req) => CompleteRequest::Code(req.fill(oracle)?), - Request::Execution(req) => CompleteRequest::Execution(req.fill(oracle)?), - }) + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + match *self { + Request::Headers(ref mut req) => req.fill(oracle), + Request::HeaderProof(ref mut req) => req.fill(oracle), + Request::Receipts(ref mut req) => req.fill(oracle), + Request::Body(ref mut req) => req.fill(oracle), + Request::Account(ref mut req) => req.fill(oracle), + Request::Storage(ref mut req) => req.fill(oracle), + Request::Code(ref mut req) => req.fill(oracle), + Request::Execution(ref mut req) => req.fill(oracle), + } + } + + fn complete(self) -> Result { + match self { + Request::Headers(req) => req.complete().map(CompleteRequest::Headers), + Request::HeaderProof(req) => req.complete().map(CompleteRequest::HeaderProof), + Request::Receipts(req) => req.complete().map(CompleteRequest::Receipts), + Request::Body(req) => req.complete().map(CompleteRequest::Body), + Request::Account(req) => req.complete().map(CompleteRequest::Account), + Request::Storage(req) => req.complete().map(CompleteRequest::Storage), + Request::Code(req) => req.complete().map(CompleteRequest::Code), + Request::Execution(req) => req.complete().map(CompleteRequest::Execution), + } } } @@ -486,13 +507,16 @@ pub trait IncompleteRequest: Sized { /// Note that this request will produce the following outputs. fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind); - /// Fill the request. + /// Fill fields of the request. /// /// This function is provided an "output oracle" which allows fetching of /// prior request outputs. - /// Only outputs previously checked with `check_outputs` will be available. - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result; + /// Only outputs previously checked with `check_outputs` may be available. + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result; + + /// Attempt to convert this request into its complete variant. + /// Will succeed if all fields have been filled, will fail otherwise. + fn complete(self) -> Result; } /// Header request. @@ -551,25 +575,24 @@ pub mod header { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) { } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let start = match self.start { - Field::Scalar(start) => start, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash.into(), - Output::Number(num) => num.into(), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.start { + self.start = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + Ok(Output::Number(num)) => Field::Scalar(num.into()), + Err(_) => Field::BackReference(req, idx), } - }; + } + } + fn complete(self) -> Result { Ok(Complete { - start: start, + start: self.start.into_scalar()?, skip: self.skip, max: self.max, reverse: self.reverse, }) } - } /// A complete header request. @@ -671,22 +694,20 @@ pub mod header_proof { note(0, OutputKind::Hash); } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let num = match self.num { - Field::Scalar(num) => num, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Number(num) => num, - _ => return Err(NoSuchOutput), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.num { + self.num = match oracle(req, idx) { + Ok(Output::Number(num)) => Field::Scalar(num.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - num: num, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + num: self.num.into_scalar()?, + }) + } } /// A complete header proof request. @@ -779,19 +800,18 @@ pub mod block_receipts { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let hash = match self.hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Number(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), } - }; + } + } + fn complete(self) -> Result { Ok(Complete { - hash: hash, + hash: self.hash.into_scalar()?, }) } } @@ -875,22 +895,20 @@ pub mod block_body { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let hash = match self.hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - hash: hash, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + hash: self.hash.into_scalar()?, + }) + } } /// A complete block body request. @@ -991,31 +1009,28 @@ pub mod account { f(1, OutputKind::Hash); } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let address_hash = match self.address_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - block_hash: block_hash, - address_hash: address_hash, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + }) + } } /// A complete request for an account. @@ -1138,40 +1153,36 @@ pub mod storage { f(0, OutputKind::Hash); } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let address_hash = match self.address_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let key_hash = match self.key_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.key_hash { + self.key_hash = match oracle(req, idx) { + Ok(Output::Hash(key_hash)) => Field::Scalar(key_hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - block_hash: block_hash, - address_hash: address_hash, - key_hash: key_hash - }) + } } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + key_hash: self.key_hash.into_scalar()?, + }) + } } /// A complete request for a storage proof. @@ -1272,31 +1283,28 @@ pub mod contract_code { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let code_hash = match self.code_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.code_hash { + self.code_hash = match oracle(req, idx) { + Ok(Output::Hash(code_hash)) => Field::Scalar(code_hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - block_hash: block_hash, - code_hash: code_hash, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + code_hash: self.code_hash.into_scalar()?, + }) + } } /// A complete request. @@ -1411,19 +1419,17 @@ pub mod execution { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; - + } + } + fn complete(self) -> Result { Ok(Complete { - block_hash: block_hash, + block_hash: self.block_hash.into_scalar()?, from: self.from, action: self.action, gas: self.gas, @@ -1432,7 +1438,6 @@ pub mod execution { data: self.data, }) } - } /// A complete request. From 491eeb9878341336071a0e9de80e4afc758cc5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 16 Mar 2017 12:48:51 +0100 Subject: [PATCH 34/91] Better invalid encoding messages --- Cargo.lock | 310 +++++++++++++++-------------- Cargo.toml | 2 +- dapps/src/api/api.rs | 2 +- dapps/src/rpc.rs | 6 +- ethcore/light/src/on_demand/mod.rs | 18 +- ethcore/src/spec/spec.rs | 2 +- ipfs/Cargo.toml | 2 +- ipfs/src/lib.rs | 16 +- json/src/hash.rs | 8 +- json/src/uint.rs | 10 +- parity/rpc.rs | 20 +- rpc/src/lib.rs | 15 +- rpc/src/v1/impls/signing.rs | 12 +- rpc/src/v1/types/hash.rs | 9 +- rpc/src/v1/types/uint.rs | 16 +- rpc_client/Cargo.toml | 2 +- stratum/Cargo.toml | 7 +- util/reactor/src/lib.rs | 7 +- 18 files changed, 239 insertions(+), 225 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 257872770..8be5bd809 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,7 +29,7 @@ dependencies = [ "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -192,6 +192,15 @@ dependencies = [ "stable-heap 0.1.0 (git+https://github.com/carllerche/stable-heap?rev=3c5cd1ca47)", ] +[[package]] +name = "bytes" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "cfg-if" version = "0.1.0" @@ -445,7 +454,7 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -555,7 +564,7 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-network 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -621,7 +630,7 @@ dependencies = [ "ethstore 0.1.0", "ethsync 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -688,15 +697,14 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", "semver 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -851,7 +859,7 @@ dependencies = [ name = "fetch" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -870,11 +878,8 @@ dependencies = [ [[package]] name = "futures" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "futures-cpupool" @@ -882,7 +887,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1050,6 +1055,15 @@ name = "integer-encoding" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "iovec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ipc-common-types" version = "1.7.0" @@ -1083,47 +1097,22 @@ name = "itoa" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "jsonrpc-core" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" -dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "jsonrpc-http-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" -dependencies = [ - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1136,33 +1125,19 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ - "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "jsonrpc-macros" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" -dependencies = [ - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1172,7 +1147,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1182,25 +1157,24 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-tcp-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ - "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1228,6 +1202,11 @@ name = "lazycell" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "lazycell" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "libc" version = "0.2.16" @@ -1316,38 +1295,6 @@ dependencies = [ "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "mio" -version = "0.5.1" -source = "git+https://github.com/ethcore/mio?branch=v0.5.x#3842d3b250ffd7bd9b16f9586b875ddcbac2b0dd" -dependencies = [ - "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "mio" version = "0.6.0-dev" @@ -1356,7 +1303,7 @@ dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)", @@ -1372,7 +1319,7 @@ dependencies = [ "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1381,32 +1328,56 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "mio-named-pipes" +version = "0.1.4" +source = "git+https://github.com/alexcrichton/mio-named-pipes#903dc2f7eac6700c62bfdda258a599db13a9228f" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "mio-uds" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miow" -version = "0.1.3" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miow" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1484,15 +1455,6 @@ dependencies = [ "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "nix" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "nix" version = "0.6.0" @@ -1689,7 +1651,7 @@ dependencies = [ "ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1706,7 +1668,7 @@ dependencies = [ "ethcore 1.7.0", "ethcore-util 1.7.0", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -1731,8 +1693,8 @@ dependencies = [ name = "parity-reactor" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1742,8 +1704,8 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-signer 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1753,6 +1715,22 @@ dependencies = [ "ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)", ] +[[package]] +name = "parity-tokio-ipc" +version = "0.1.0" +source = "git+https://github.com/nikvolf/parity-tokio-ipc#3d4234de6bdc78688ef803935111003080fd5375" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", + "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)", + "tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)", + "tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parity-ui" version = "1.7.0" @@ -2060,7 +2038,7 @@ dependencies = [ "ethcore-bigint 0.1.2", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc-client 1.4.0", "rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2239,11 +2217,6 @@ name = "siphasher" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "slab" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "slab" version = "0.2.0" @@ -2408,29 +2381,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "tokio-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-io" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-line" +version = "0.1.0" +source = "git+https://github.com/tokio-rs/tokio-line#482614ae0c82daf584727ae65a80d854fe861f81" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-named-pipes" +version = "0.1.0" +source = "git+https://github.com/alexcrichton/tokio-named-pipes#3a22f8fc9a441b548aec25bd5df3b1e0ab99fabe" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-proto" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2439,7 +2443,7 @@ name = "tokio-service" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2447,11 +2451,11 @@ name = "tokio-uds" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2662,6 +2666,7 @@ dependencies = [ "checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "" +"checksum bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "46112a0060ae15e3a3f9a445428a53e082b91215b744fa27a1948842f4a64b96" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e53e6cdfa5ca294863e8c8a32a7cdb4dc0a442c8971d47a0e75b6c27ea268a6a" "checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32" @@ -2684,7 +2689,7 @@ dependencies = [ "checksum ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d8f6cc4c1acd005f48e1d17b06a461adac8fb6eeeb331fbf19a0e656fba91cd" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" -"checksum futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c1913eb7083840b1bbcbf9631b7fda55eaf35fe7ead13cca034e8946f9e2bc41" +"checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d" "checksum futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bb982bb25cd8fa5da6a8eb3a460354c984ff1113da82bcb4f0b0862b5795db82" "checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312" "checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518" @@ -2701,23 +2706,22 @@ dependencies = [ "checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11" "checksum igd 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c8c12b1795b8b168f577c45fa10379b3814dcb11b7ab702406001f0d63f40484" "checksum integer-encoding 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a053c9c7dcb7db1f2aa012c37dc176c62e4cdf14898dee0eecc606de835b8acb" +"checksum iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29d062ee61fccdf25be172e70f34c9f6efc597e1fb8f6526e8437b2046ab26be" "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d95557e7ba6b71377b0f2c3b3ae96c53f1b75a926a6901a500f557a370af730a" "checksum itoa 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91fd9dc2c587067de817fec4ad355e3818c3d893a78cab32a0a474c7a15bb8d5" -"checksum jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" "checksum jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" "checksum jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" "checksum jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" +"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b" +"checksum lazycell 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ec38a5c22f1ef3e30d2642aa875620d60edeef36cef43c4739d86215ce816331" "checksum libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)" = "408014cace30ee0f767b1c4517980646a573ec61a57957aeeabcac8ac0a02e8d" "checksum libusb 0.3.0 (git+https://github.com/ethcore/libusb-rs)" = "" "checksum libusb-sys 0.2.3 (git+https://github.com/ethcore/libusb-sys)" = "" @@ -2730,13 +2734,13 @@ dependencies = [ "checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2" "checksum mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e50bf542f81754ef69e5cea856946a3819f7c09ea97b4903c8bc8a89f74e7b6" "checksum miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "9d1f4d337a01c32e1f2122510fed46393d53ca35a7f429cb0450abaedfa3ed54" -"checksum mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)" = "" -"checksum mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a637d1ca14eacae06296a008fa7ad955347e34efcb5891cfd8ba05491a37907e" "checksum mio 0.6.0-dev (git+https://github.com/ethcore/mio?branch=timer-fix)" = "" "checksum mio 0.6.1 (git+https://github.com/ethcore/mio)" = "" -"checksum mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "410a1a0ff76f5a226f1e4e3ff1756128e65cd30166e39c3892283e2ac09d5b67" +"checksum mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5b493dc9fd96bd2077f2117f178172b0765db4dfda3ea4d8000401e6d65d3e80" +"checksum mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)" = "" "checksum mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "78437f00d9615c366932cbfe79790b5c2945706ba67cf78378ffacc0069ed9de" -"checksum miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d5bfc6782530ac8ace97af10a540054a37126b63b0702ddaaa243b73b5745b9a" +"checksum miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3e690c5df6b2f60acd45d56378981e827ff8295562fc8d34f573deb267a59cd1" +"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum msdos_time 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c04b68cc63a8480fb2550343695f7be72effdec953a9d4508161c3e69041c7d8" "checksum multibase 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b9c35dac080fd6e16a99924c8dfdef0af89d797dd851adab25feaffacf7850d6" "checksum multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "755d5a39bee3faaf649437e873beab334990221b2faf1f2e56ca10a9e4600235" @@ -2744,7 +2748,6 @@ dependencies = [ "checksum nanomsg-sys 0.5.0 (git+https://github.com/ethcore/nanomsg.rs.git)" = "" "checksum native-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa4e52995154bb6f0b41e4379a279482c9387c1632e3798ba4e511ef8c54ee09" "checksum net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "6a816012ca11cb47009693c1e0c6130e26d39e4d97ee2a13c50e868ec83e3204" -"checksum nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f05c2fc965fc1cd6b73fa57fa7b89f288178737f2f3ce9e63e4a6a141189000e" "checksum nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a7bb1da2be7da3cbffda73fc681d509ffd9e665af478d2bee1907cee0bc64b2" "checksum nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0d95c5fa8b641c10ad0b8887454ebaafa3c92b5cd5350f8fc693adafd178e7b" "checksum nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4d9a22dbcebdeef7bf275cbf444d6521d4e7a2fee187b72d80dba0817120dd8f" @@ -2766,6 +2769,7 @@ dependencies = [ "checksum order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "efa535d5117d3661134dbf1719b6f0ffe06f2375843b13935db186cd094105eb" "checksum owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d91377085359426407a287ab16884a0111ba473aa6844ff01d4ec20ce3d75e7" "checksum parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98378dec0a185da2b7180308752f0bad73aaa949c3e0a3b0528d0e067945f7ab" +"checksum parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "" "checksum parity-ui-precompiled 1.4.0 (git+https://github.com/ethcore/js-precompiled.git)" = "" "checksum parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e1435e7a2a00dfebededd6c6bdbd54008001e94b4a2aadd6aef0dc4c56317621" "checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068" @@ -2816,7 +2820,6 @@ dependencies = [ "checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c" "checksum shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f20b8f3c060374edb8046591ba28f62448c369ccbdc7b02075103fb3a9e38d" "checksum siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c44e42fa187b5a8782489cf7740cc27c3125806be2bf33563cf5e02e9533fcd" -"checksum slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d807fd58c4181bbabed77cb3b891ba9748241a552bcc5be698faaebefc54f46e" "checksum slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)" = "" "checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" @@ -2839,7 +2842,10 @@ dependencies = [ "checksum thread_local 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0694f51610ef7cfac7a1b81de7f1602ee5356e76541bcd62c40e71933338cab1" "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" -"checksum tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "52416b3e937abac22a543a7f1c66bd37feb60137ff1ab42390fa02df85347e58" +"checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b" +"checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473" +"checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "" +"checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "" "checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a" diff --git a/Cargo.toml b/Cargo.toml index 2a01d2c8e..8725aaa78 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ app_dirs = "1.1.1" fdlimit = "0.1" hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } ethsync = { path = "sync" } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index ce8f495e6..e07bd4535 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -117,7 +117,7 @@ impl RestApiRouter { impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { - self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains); + self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into(); if let Method::Options = *request.method() { self.handler = response::empty(); diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index 0c95051e4..e7f5eef99 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -49,8 +49,10 @@ struct RpcEndpoint> { #[derive(Default)] struct NoopMiddleware; impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, _request: &hyper::server::Request) -> http::RequestMiddlewareAction { - http::RequestMiddlewareAction::Proceed + fn on_request(&self, request: &hyper::server::Request) -> http::RequestMiddlewareAction { + http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: request.headers().get::().is_none(), + } } } diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 25cde402b..585985f05 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -74,6 +74,8 @@ pub struct OnDemand { orphaned_requests: RwLock>, } +const RECEIVER_IN_SCOPE: &'static str = "Receiver is still in scope, so it's not dropped; qed"; + impl OnDemand { /// Create a new `OnDemand` service with the given cache. pub fn new(cache: Arc>) -> Self { @@ -95,7 +97,7 @@ impl OnDemand { }; match cached { - Some(hdr) => sender.complete(hdr), + Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), None => self.dispatch_header_by_number(ctx, req, ChtProofSender::Header(sender)), } receiver @@ -111,7 +113,7 @@ impl OnDemand { }; match cached { - Some(score) => sender.complete(score), + Some(score) => sender.send(score).expect(RECEIVER_IN_SCOPE), None => self.dispatch_header_by_number(ctx, req, ChtProofSender::ChainScore(sender)), } @@ -132,7 +134,7 @@ impl OnDemand { }; match cached { - (Some(hdr), Some(score)) => sender.complete((hdr, score)), + (Some(hdr), Some(score)) => sender.send((hdr, score)).expect(RECEIVER_IN_SCOPE), _ => self.dispatch_header_by_number(ctx, req, ChtProofSender::Both(sender)), } @@ -183,7 +185,7 @@ impl OnDemand { pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { let (sender, receiver) = oneshot::channel(); match self.cache.lock().block_header(&req.0) { - Some(hdr) => sender.complete(hdr), + Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), None => self.dispatch_header_by_hash(ctx, req, sender), } receiver @@ -241,7 +243,7 @@ impl OnDemand { stream.begin_list(0); stream.begin_list(0); - sender.complete(encoded::Block::new(stream.out())) + sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE) } else { match self.cache.lock().block_body(&req.hash) { Some(body) => { @@ -293,10 +295,10 @@ impl OnDemand { // fast path for empty receipts. if req.0.receipts_root() == SHA3_NULL_RLP { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) } else { match self.cache.lock().block_receipts(&req.0.hash()) { - Some(receipts) => sender.complete(receipts), + Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), None => self.dispatch_block_receipts(ctx, req, sender), } } @@ -381,7 +383,7 @@ impl OnDemand { // fast path for no code. if req.code_hash == ::util::sha3::SHA3_EMPTY { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) } else { self.dispatch_code(ctx, req, sender); } diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 078908db4..76e925a85 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -323,7 +323,7 @@ impl Spec { pub fn load(reader: R) -> Result where R: Read { match ethjson::spec::Spec::load(reader) { Ok(spec) => Ok(spec.into()), - _ => Err("Spec json is invalid".into()), + Err(e) => Err(format!("Spec json is invalid: {}", e)), } } diff --git a/ipfs/Cargo.toml b/ipfs/Cargo.toml index 6ce5518c3..1443c8cf2 100644 --- a/ipfs/Cargo.toml +++ b/ipfs/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Parity Technologies "] [dependencies] ethcore = { path = "../ethcore" } ethcore-util = { path = "../util" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } +jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } rlp = { path = "../util/rlp" } mime = "0.2" hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 287abb27c..3d79c00fb 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -72,17 +72,6 @@ impl IpfsHandler { client: client, } } - - fn is_origin_allowed(&self, origin_provided: bool) -> bool { - match (origin_provided, self.cors_header.as_ref()) { - // Request without Origin are always OK. - (false, _) => true, - // If there is a cors header to be returned it's ok. - (true, Some(_)) => true, - // If origin is provided and we won't return cors header it's bad. - (true, None) => false, - } - } } /// Implement Hyper's HTTP handler @@ -92,7 +81,6 @@ impl Handler for IpfsHandler { return Next::write(); } - self.cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); if !jsonrpc_http_server::is_host_allowed(&req, &self.allowed_hosts) { self.out = Out::Bad("Disallowed Host header"); @@ -100,11 +88,13 @@ impl Handler for IpfsHandler { return Next::write(); } - if !self.is_origin_allowed(req.headers().get::().is_some()) { + let cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); + if cors_header == jsonrpc_http_server::CorsHeader::Invalid { self.out = Out::Bad("Disallowed Origin header"); return Next::write(); } + self.cors_header = cors_header.into(); let (path, query) = match *req.uri() { RequestUri::AbsolutePath { ref path, ref query } => (path, query.as_ref().map(AsRef::as_ref)), diff --git a/json/src/hash.rs b/json/src/hash.rs index ae6ba1a81..78fa77bd9 100644 --- a/json/src/hash.rs +++ b/json/src/hash.rs @@ -59,11 +59,11 @@ macro_rules! impl_hash { let value = match value.len() { 0 => $inner::from(0), 2 if value == "0x" => $inner::from(0), - _ if value.starts_with("0x") => $inner::from_str(&value[2..]).map_err(|_| { - Error::custom(format!("Invalid hex value {}.", value).as_str()) + _ if value.starts_with("0x") => $inner::from_str(&value[2..]).map_err(|e| { + Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) })?, - _ => $inner::from_str(value).map_err(|_| { - Error::custom(format!("Invalid hex value {}.", value).as_str()) + _ => $inner::from_str(value).map_err(|e| { + Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) })?, }; diff --git a/json/src/uint.rs b/json/src/uint.rs index 281820d78..6b206b380 100644 --- a/json/src/uint.rs +++ b/json/src/uint.rs @@ -63,7 +63,7 @@ impl Visitor for UintVisitor { type Value = Uint; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a hex encoded uint") + write!(formatter, "a hex encoded or decimal uint") } fn visit_u64(self, value: u64) -> Result where E: Error { @@ -74,11 +74,11 @@ impl Visitor for UintVisitor { let value = match value.len() { 0 => U256::from(0), 2 if value.starts_with("0x") => U256::from(0), - _ if value.starts_with("0x") => U256::from_str(&value[2..]).map_err(|_| { - Error::custom(format!("Invalid hex value {}.", value).as_str()) + _ if value.starts_with("0x") => U256::from_str(&value[2..]).map_err(|e| { + Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) })?, - _ => U256::from_dec_str(value).map_err(|_| { - Error::custom(format!("Invalid decimal value {}.", value).as_str()) + _ => U256::from_dec_str(value).map_err(|e| { + Error::custom(format!("Invalid decimal value {}: {:?}", value, e).as_str()) })? }; diff --git a/parity/rpc.rs b/parity/rpc.rs index b0af8aa0b..a435f24db 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -20,7 +20,7 @@ use std::net::SocketAddr; use std::io; use dir::default_data_path; -use ethcore_rpc::{self as rpc, HttpServerError, IpcServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; +use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use ethcore_rpc::informant::{RpcStats, Middleware}; use helpers::parity_ipc_path; use hyper; @@ -100,6 +100,15 @@ impl rpc::HttpMetaExtractor for RpcExtractor { } } +impl rpc::IpcMetaExtractor for RpcExtractor { + fn extract(&self, _req: &rpc::IpcRequestContext) -> Metadata { + let mut metadata = Metadata::default(); + // TODO [ToDr] Extract proper session id when it's available in context. + metadata.origin = Origin::Ipc(1.into()); + metadata + } +} + pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { return Ok(None); @@ -136,17 +145,16 @@ pub fn setup_http_rpc_server( } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result>, String> { +pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { return Ok(None); } Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) } -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result, String> { +pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { let handler = setup_apis(apis, dependencies); let remote = dependencies.remote.clone(); - match rpc::start_ipc(addr, handler, remote) { - Err(IpcServerError::Io(io_error)) => Err(format!("RPC io error: {}", io_error)), - Err(any_error) => Err(format!("Rpc error: {:?}", any_error)), + match rpc::start_ipc(addr, handler, remote, RpcExtractor) { + Err(io_error) => Err(format!("RPC io error: {}", io_error)), Ok(server) => Ok(server) } } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 7a9ee5a22..9dc2f6f29 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -63,7 +63,7 @@ extern crate ethcore_devtools as devtools; pub mod v1; -pub use ipc::{Server as IpcServer, Error as IpcServerError}; +pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext}; pub use http::{HttpMetaExtractor, Server as HttpServer, Error as HttpServerError, AccessControlAllowOrigin, Host}; pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; @@ -95,16 +95,19 @@ pub fn start_http( } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_ipc( +pub fn start_ipc( addr: &str, handler: H, remote: tokio_core::reactor::Remote, -) -> Result, ipc::Error> where + extractor: T, +) -> ::std::io::Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, H: Into>, + T: IpcMetaExtractor, { - let server = ipc::Server::with_remote(addr, handler, ipc::UninitializedRemote::Shared(remote))?; - server.run_async()?; - Ok(server) + ipc::ServerBuilder::new(handler) + .event_loop_remote(remote) + .session_metadata_extractor(extractor) + .start(addr) } diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index d737131a6..c322588c5 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -200,11 +200,11 @@ impl EthSigning for SigningQueueClient { res.then(move |res| { handle_dispatch(res, move |response| { - match response { + ignore_error(match response { Ok(RpcConfirmationResponse::Signature(sig)) => ready.complete(Ok(sig)), Err(e) => ready.complete(Err(e)), e => ready.complete(Err(errors::internal("Unexpected result.", e))), - } + }) }); p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) @@ -222,11 +222,11 @@ impl EthSigning for SigningQueueClient { res.then(move |res| { handle_dispatch(res, move |response| { - match response { + ignore_error(match response { Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.complete(Ok(hash)), Err(e) => ready.complete(Err(e)), e => ready.complete(Err(errors::internal("Unexpected result.", e))), - } + }) }); p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) @@ -244,11 +244,11 @@ impl EthSigning for SigningQueueClient { res.then(move |res| { handle_dispatch(res, move |response| { - match response { + ignore_error(match response { Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.complete(Ok(tx)), Err(e) => ready.complete(Err(e)), e => ready.complete(Err(errors::internal("Unexpected result.", e))), - } + }) }); p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) diff --git a/rpc/src/v1/types/hash.rs b/rpc/src/v1/types/hash.rs index c96a3433b..791042fe0 100644 --- a/rpc/src/v1/types/hash.rs +++ b/rpc/src/v1/types/hash.rs @@ -124,13 +124,16 @@ macro_rules! impl_hash { type Value = $name; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed, padded, hex-encoded hash of type {}", stringify!($name)) + write!(formatter, "a 0x-prefixed, padded, hex-encoded hash with length {}", $size * 2) } fn visit_str(self, value: &str) -> Result where E: serde::de::Error { + if value.len() < 2 || &value[0..2] != "0x" { + return Err(E::custom("Expected hex-encoded hash with 0x prefix.")); + } if value.len() != 2 + $size * 2 { - return Err(E::custom("Invalid length.")); + return Err(E::invalid_length(value.len() - 2, &self)); } match value[2..].from_hex() { @@ -139,7 +142,7 @@ macro_rules! impl_hash { result.copy_from_slice(v); Ok($name(result)) }, - _ => Err(E::custom("Invalid hex value.")) + Err(e) => Err(E::custom(format!("Invalid hex value: {:?}", e))), } } diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index ba3b83fa7..e646ec6c2 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -74,20 +74,20 @@ macro_rules! impl_uint { type Value = $name; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed, hex-encoded number of type {}", stringify!($name)) + write!(formatter, "a 0x-prefixed, hex-encoded number of length {}", $size*16) } fn visit_str(self, value: &str) -> Result where E: serde::de::Error { + if value.len() < 2 || &value[0..2] != "0x" { + return Err(E::custom("Use hex-encoded numbers with 0x prefix.")) + } + // 0x + len - if value.len() > 2 + $size * 16 || value.len() < 2 { - return Err(E::custom("Invalid length.")); + if value.len() > 2 + $size * 16 { + return Err(E::invalid_length(value.len() - 2, &self)); } - if &value[0..2] != "0x" { - return Err(E::custom("Use hex encoded numbers with 0x prefix.")) - } - - $other::from_str(&value[2..]).map($name).map_err(|_| E::custom("Invalid hex value.")) + $other::from_str(&value[2..]).map($name).map_err(|e| E::custom(&format!("Invalid hex value: {:?}", e))) } fn visit_string(self, value: String) -> Result where E: serde::de::Error { diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index 77f9c3edf..a70816f9e 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -14,7 +14,7 @@ serde = "0.9" serde_json = "0.9" tempdir = "0.3.5" url = "1.2.0" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" } ethcore-rpc = { path = "../rpc" } ethcore-signer = { path = "../signer" } diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index 1b309985a..201792340 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -11,10 +11,9 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] log = "0.3" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } -jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } -jsonrpc-tcp-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } -mio = { git = "https://github.com/ethcore/mio", branch = "v0.5.x" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-tcp-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } ethcore-util = { path = "../util" } ethcore-devtools = { path = "../devtools" } lazy_static = "0.2" diff --git a/util/reactor/src/lib.rs b/util/reactor/src/lib.rs index c1d7f8631..e5f04d652 100644 --- a/util/reactor/src/lib.rs +++ b/util/reactor/src/lib.rs @@ -190,7 +190,7 @@ impl From for EventLoopHandle { impl Drop for EventLoopHandle { fn drop(&mut self) { - self.close.take().map(|v| v.complete(())); + self.close.take().map(|v| v.send(())); } } @@ -203,7 +203,8 @@ impl EventLoopHandle { /// Finishes this event loop. pub fn close(mut self) { - self.close.take() - .expect("Close is taken only in `close` and `drop`. `close` is consuming; qed").complete(()) + let _ = self.close.take() + .expect("Close is taken only in `close` and `drop`. `close` is consuming; qed") + .send(()); } } From 579cff478dbb293fb39ef429dc3c7097e9206fc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 16 Mar 2017 15:43:31 +0100 Subject: [PATCH 35/91] Fixing deprecated methods of tokio_core --- Cargo.lock | 57 ++++++++++++++++++------- dapps/Cargo.toml | 1 - dapps/src/lib.rs | 2 +- dapps/src/rpc.rs | 23 +++++----- dapps/src/tests/helpers/fetch.rs | 2 +- dapps/src/tests/rpc.rs | 5 +-- ethcore/Cargo.toml | 5 +-- ethcore/light/src/on_demand/mod.rs | 3 ++ rpc/src/v1/helpers/mod.rs | 1 + rpc/src/v1/helpers/oneshot.rs | 67 ++++++++++++++++++++++++++++++ rpc/src/v1/impls/signing.rs | 57 +++++++++++++------------ rpc/src/v1/tests/helpers/fetch.rs | 2 +- 12 files changed, 158 insertions(+), 67 deletions(-) create mode 100644 rpc/src/v1/helpers/oneshot.rs diff --git a/Cargo.lock b/Cargo.lock index 8be5bd809..7ffc74576 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -409,7 +409,7 @@ dependencies = [ "ethstore 0.1.0", "evmjit 1.7.0", "hardware-wallet 1.7.0", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", + "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -455,7 +455,6 @@ dependencies = [ "ethcore-util 1.7.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -661,7 +660,7 @@ dependencies = [ "ethcore-util 1.7.0", "ethcrypto 0.1.0", "ethkey 0.2.0", - "hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1001,7 +1000,26 @@ dependencies = [ [[package]] name = "hyper" -version = "0.10.4" +version = "0.10.0-a.0" +source = "git+https://github.com/paritytech/hyper#453c683b52208fefc32d29e4ac7c863439b2321f" +dependencies = [ + "cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rotor 0.6.3 (git+https://github.com/ethcore/rotor)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1012,7 +1030,7 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1024,7 +1042,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1100,7 +1118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1112,9 +1130,9 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", + "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1125,7 +1143,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1137,7 +1155,7 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1147,7 +1165,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1157,7 +1175,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1168,7 +1186,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1951,7 +1969,7 @@ name = "reqwest" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "hyper-native-tls 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2479,6 +2497,11 @@ name = "traitobject" version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "traitobject" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "transient-hashmap" version = "0.1.0" @@ -2700,7 +2723,8 @@ dependencies = [ "checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" "checksum hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)" = "" -"checksum hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)" = "220407e5a263f110ec30a071787c9535918fdfc97def5680c90013c3f30c38c1" +"checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "" +"checksum hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)" = "43a15e3273b2133aaac0150478ab443fb89f15c3de41d8d93d8f3bb14bf560f6" "checksum hyper 0.9.18 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9bf64f730d6ee4b0528a5f0a316363da9d8104318731509d4ccc86248f82b3" "checksum hyper-native-tls 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "afe68f772f0497a7205e751626bb8e1718568b58534b6108c73a74ef80483409" "checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11" @@ -2852,6 +2876,7 @@ dependencies = [ "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" "checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72" "checksum traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07eaeb7689bb7fca7ce15628319635758eda769fed481ecfe6686ddef2600616" +"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" "checksum transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15f7cc7116182edca1ed08f6f8c4da92104555ca77addbabea4eaa59b20373d0" "checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" "checksum unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "13a5906ca2b98c799f4b1ab4557b76367ebd6ae5ef14930ec841c74aed5f3764" diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 5bbad53c7..95e1f3f56 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -25,7 +25,6 @@ unicase = "1.3" url = "1.0" zip = { version = "0.1", default-features = false } -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } # TODO [ToDr] Temporary solution, server should be merged with RPC. diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index eca6fd991..252e1c3bb 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -21,7 +21,6 @@ extern crate base32; extern crate futures; -extern crate hyper; extern crate linked_hash_map; extern crate mime_guess; extern crate rand; @@ -78,6 +77,7 @@ use std::collections::HashMap; use jsonrpc_core::{Middleware, MetaIoHandler}; use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote; pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin}; +pub use jsonrpc_http_server::hyper; use ethcore_rpc::Metadata; use fetch::{Fetch, Client as FetchClient}; diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index e7f5eef99..6ddb31db0 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -46,15 +46,6 @@ struct RpcEndpoint> { allowed_hosts: Option>, } -#[derive(Default)] -struct NoopMiddleware; -impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, request: &hyper::server::Request) -> http::RequestMiddlewareAction { - http::RequestMiddlewareAction::Proceed { - should_continue_on_invalid_cors: request.headers().get::().is_none(), - } - } -} impl> Endpoint for RpcEndpoint { fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box { @@ -72,10 +63,20 @@ impl> Endpoint for RpcEndpoint { } } +#[derive(Default)] +struct NoopMiddleware; +impl http::RequestMiddleware for NoopMiddleware { + fn on_request(&self, request: &http::hyper::server::Request) -> http::RequestMiddlewareAction { + http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: request.headers().get::().is_none(), + } + } +} + struct MetadataExtractor; impl HttpMetaExtractor for MetadataExtractor { - fn read_metadata(&self, request: &hyper::server::Request) -> Metadata { - let dapp_id = request.headers().get::() + fn read_metadata(&self, request: &http::hyper::server::Request) -> Metadata { + let dapp_id = request.headers().get::() .map(|origin| format!("{}://{}", origin.scheme, origin.host)) .or_else(|| { // fallback to custom header, but only if origin is null diff --git a/dapps/src/tests/helpers/fetch.rs b/dapps/src/tests/helpers/fetch.rs index fcfd4db9c..e6e875c51 100644 --- a/dapps/src/tests/helpers/fetch.rs +++ b/dapps/src/tests/helpers/fetch.rs @@ -114,7 +114,7 @@ impl Fetch for FakeFetch { let data = response.lock().take().unwrap_or(b"Some content"); let cursor = io::Cursor::new(data); - tx.complete(fetch::Response::from_reader(cursor)); + tx.send(fetch::Response::from_reader(cursor)).unwrap(); }); rx.map_err(|_| fetch::Error::Aborted).boxed() diff --git a/dapps/src/tests/rpc.rs b/dapps/src/tests/rpc.rs index 0dbba384c..2cc4ccb24 100644 --- a/dapps/src/tests/rpc.rs +++ b/dapps/src/tests/rpc.rs @@ -55,8 +55,8 @@ fn should_extract_metadata() { // given let mut io = MetaIoHandler::default(); io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("https://parity.io/".into())); - assert_eq!(meta.dapp_id(), "https://parity.io/".into()); + assert_eq!(meta.origin, Origin::Dapps("".into())); + assert_eq!(meta.dapp_id(), "".into()); future::ok(Value::String("Hello World!".into())).boxed() }); let server = serve_with_rpc(io); @@ -68,7 +68,6 @@ fn should_extract_metadata() { POST /rpc/ HTTP/1.1\r\n\ Host: 127.0.0.1:8080\r\n\ Connection: close\r\n\ - Origin: https://parity.io/\r\n\ X-Parity-Origin: https://this.should.be.ignored\r\n\ Content-Type: application/json\r\n\ Content-Length: {}\r\n\ diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 64010fadf..ae029206a 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -44,10 +44,7 @@ ethcore-stratum = { path = "../stratum" } ethcore-bloom-journal = { path = "../util/bloom" } hardware-wallet = { path = "../hw" } stats = { path = "../util/stats" } - -[dependencies.hyper] -git = "https://github.com/ethcore/hyper" -default-features = false +hyper = { git = "https://github.com/paritytech/hyper", default-features = false } [features] jit = ["evmjit"] diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 585985f05..4941552fc 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -18,6 +18,9 @@ //! The request service is implemented using Futures. Higher level request handlers //! will take the raw data received here and extract meaningful results from it. +// TODO [ToDr] Suppressing deprecation warnings. Rob will fix the API anyway. +#![allow(deprecated)] + use std::collections::HashMap; use std::sync::Arc; diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index 1d6bd14f3..ff1bc9dbe 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -20,6 +20,7 @@ pub mod errors; pub mod block_import; pub mod dispatch; pub mod informant; +pub mod oneshot; mod network_settings; mod poll_manager; diff --git a/rpc/src/v1/helpers/oneshot.rs b/rpc/src/v1/helpers/oneshot.rs new file mode 100644 index 000000000..c128ccf55 --- /dev/null +++ b/rpc/src/v1/helpers/oneshot.rs @@ -0,0 +1,67 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core::Error; +use futures::{self, Future}; +use futures::sync::oneshot; +use v1::helpers::errors; + +pub type Res = Result; + +pub struct Sender { + sender: oneshot::Sender>, +} + +impl Sender { + pub fn send(self, data: Res) { + let res = self.sender.send(data); + if let Err(_) = res { + debug!(target: "rpc", "Responding to a no longer active request."); + } + } +} + +pub struct Receiver { + receiver: oneshot::Receiver>, +} + +impl Future for Receiver { + type Item = T; + type Error = Error; + + fn poll(&mut self) -> futures::Poll { + let res = self.receiver.poll(); + match res { + Ok(futures::Async::NotReady) => Ok(futures::Async::NotReady), + Ok(futures::Async::Ready(Ok(res))) => Ok(futures::Async::Ready(res)), + Ok(futures::Async::Ready(Err(err))) => Err(err), + Err(e) => { + debug!(target: "rpc", "Responding to a canceled request: {:?}", e); + Err(errors::internal("Request was canceled by client.", e)) + }, + } + } +} + +pub fn oneshot() -> (Sender, Receiver) { + let (tx, rx) = futures::oneshot(); + + (Sender { + sender: tx, + }, Receiver { + receiver: rx, + }) +} diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index c322588c5..f7a66f082 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -22,10 +22,10 @@ use util::{U256, Mutex}; use ethcore::account_provider::AccountProvider; -use futures::{self, future, BoxFuture, Future}; +use futures::{future, BoxFuture, Future}; use jsonrpc_core::Error; use v1::helpers::{ - errors, + errors, oneshot, DefaultAccount, SigningQueue, ConfirmationPromise, ConfirmationResult, SignerService }; @@ -167,21 +167,20 @@ impl ParitySigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); // when dispatch is complete res.then(move |res| { // register callback via the oneshot sender. handle_dispatch(res, move |response| { match response { - Ok(RpcConfirmationResponse::Decrypt(data)) => ready.complete(Ok(data)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), + Ok(RpcConfirmationResponse::Decrypt(data)) => ready.send(Ok(data)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), } }); - // and wait for that to resolve. - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } } @@ -196,18 +195,18 @@ impl EthSigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); res.then(move |res| { handle_dispatch(res, move |response| { - ignore_error(match response { - Ok(RpcConfirmationResponse::Signature(sig)) => ready.complete(Ok(sig)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), - }) + match response { + Ok(RpcConfirmationResponse::Signature(sig)) => ready.send(Ok(sig)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), + } }); - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } @@ -218,18 +217,18 @@ impl EthSigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); res.then(move |res| { handle_dispatch(res, move |response| { - ignore_error(match response { - Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.complete(Ok(hash)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), - }) + match response { + Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.send(Ok(hash)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), + } }); - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } @@ -240,18 +239,18 @@ impl EthSigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); res.then(move |res| { handle_dispatch(res, move |response| { - ignore_error(match response { - Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.complete(Ok(tx)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), - }) + match response { + Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.send(Ok(tx)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), + } }); - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } } diff --git a/rpc/src/v1/tests/helpers/fetch.rs b/rpc/src/v1/tests/helpers/fetch.rs index 58ac96bcb..236dae91b 100644 --- a/rpc/src/v1/tests/helpers/fetch.rs +++ b/rpc/src/v1/tests/helpers/fetch.rs @@ -35,7 +35,7 @@ impl Fetch for TestFetch { let (tx, rx) = futures::oneshot(); thread::spawn(move || { let cursor = io::Cursor::new(b"Some content"); - tx.complete(fetch::Response::from_reader(cursor)); + tx.send(fetch::Response::from_reader(cursor)).unwrap(); }); rx.map_err(|_| fetch::Error::Aborted).boxed() From cbb9314531be7b2ea2c2deb41f9ffafd83da19ca Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 20:23:59 +0100 Subject: [PATCH 36/91] use PIP messages in on_demand, old API --- Cargo.lock | 35 +- ethcore/light/src/lib.rs | 2 +- ethcore/light/src/on_demand/mod.rs | 610 ++++++++----------------- ethcore/light/src/on_demand/request.rs | 61 +-- ethcore/light/src/types/request/mod.rs | 4 +- 5 files changed, 227 insertions(+), 485 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6924cfe00..72d0b7778 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -445,7 +445,7 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -554,7 +554,7 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-network 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -620,7 +620,7 @@ dependencies = [ "ethstore 0.1.0", "ethsync 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-ipc-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -686,7 +686,7 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -849,7 +849,7 @@ dependencies = [ name = "fetch" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -868,11 +868,8 @@ dependencies = [ [[package]] name = "futures" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "futures-cpupool" @@ -880,7 +877,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1086,7 +1083,7 @@ name = "jsonrpc-core" version = "6.0.0" source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1623,7 +1620,7 @@ dependencies = [ "ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1665,7 +1662,7 @@ dependencies = [ name = "parity-reactor" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1676,7 +1673,7 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-signer 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1994,7 +1991,7 @@ dependencies = [ "ethcore-bigint 0.1.2", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc-client 1.4.0", "rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2345,7 +2342,7 @@ name = "tokio-core" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2357,7 +2354,7 @@ name = "tokio-proto" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2373,7 +2370,7 @@ name = "tokio-service" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2606,7 +2603,7 @@ dependencies = [ "checksum ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d8f6cc4c1acd005f48e1d17b06a461adac8fb6eeeb331fbf19a0e656fba91cd" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" -"checksum futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c1913eb7083840b1bbcbf9631b7fda55eaf35fe7ead13cca034e8946f9e2bc41" +"checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d" "checksum futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bb982bb25cd8fa5da6a8eb3a460354c984ff1113da82bcb4f0b0862b5795db82" "checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312" "checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518" diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 81a974192..ada58d8de 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -35,7 +35,7 @@ pub mod client; pub mod cht; pub mod net; -//pub mod on_demand; +pub mod on_demand; pub mod transaction_queue; pub mod cache; diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 25cde402b..df8a6c6a9 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -31,12 +31,12 @@ use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::{RlpStream, Stream}; -use util::{Bytes, DBValue, RwLock, Mutex, U256}; +use util::{Bytes, DBValue, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; -use types::les_request::{self as les_request, Request as LesRequest}; +use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; pub mod request; @@ -46,24 +46,85 @@ struct Peer { capabilities: Capabilities, } +impl Peer { + // Whether a given peer can handle a specific request. + fn can_handle(&self, pending: &Pending) -> bool { + match *pending { + Pending::HeaderProof(ref req, _) => + self.capabilities.serve_headers && self.status.head_num > req.num(), + Pending::HeaderByHash(ref req, _) => self.capabilities.serve_headers, + Pending::Block(ref req, _) => + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), + Pending::BlockReceipts(ref req, _) => + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.0.number()), + Pending::Account(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + Pending::Code(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.block_id.1), + Pending::TxProof(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + } + } +} + // Which portions of a CHT proof should be sent. enum ChtProofSender { - Both(Sender<(encoded::Header, U256)>), - Header(Sender), + Both(Sender<(H256, U256)>), + Hash(Sender), ChainScore(Sender), } // Attempted request info and sender to put received value. enum Pending { - HeaderByNumber(request::HeaderByNumber, ChtProofSender), + HeaderProof(request::HeaderProof, ChtProofSender), HeaderByHash(request::HeaderByHash, Sender), Block(request::Body, Sender), BlockReceipts(request::BlockReceipts, Sender>), - Account(request::Account, Sender), + Account(request::Account, Sender>), Code(request::Code, Sender), TxProof(request::TransactionProof, Sender>), } +impl Pending { + // Create a network request. + fn make_request(&self) -> NetworkRequest { + match *self { + Pending::HeaderByHash(ref req, _) => NetworkRequest::Headers(basic_request::IncompleteHeadersRequest { + start: basic_request::HashOrNumber::Hash(req.0).into(), + skip: 0, + max: 1, + reverse: false, + }), + Pending::HeaderProof(ref req, _) => NetworkRequest::HeaderProof(basic_request::IncompleteHeaderProofRequest { + num: req.num().into(), + }), + Pending::Block(ref req, _) => NetworkRequest::Body(basic_request::IncompleteBodyRequest { + hash: req.hash.into(), + }), + Pending::BlockReceipts(ref req, _) => NetworkRequest::Receipts(basic_request::IncompleteReceiptsRequest { + hash: req.0.hash().into(), + }), + Pending::Account(ref req, _) => NetworkRequest::Account(basic_request::IncompleteAccountRequest { + block_hash: req.header.hash().into(), + address_hash: ::util::Hashable::sha3(&req.address).into(), + }), + Pending::Code(ref req, _) => NetworkRequest::Code(basic_request::IncompleteCodeRequest { + block_hash: req.block_id.0.into(), + code_hash: req.code_hash.into(), + }), + Pending::TxProof(ref req, _) => NetworkRequest::Execution(basic_request::IncompleteExecutionRequest { + block_hash: req.header.hash().into(), + from: req.tx.sender(), + gas: req.tx.gas, + gas_price: req.tx.gas_price, + action: req.tx.action.clone(), + value: req.tx.value, + data: req.tx.data.clone(), + }), + } + } +} + /// On demand request service. See module docs for more details. /// Accumulates info about all peers' capabilities and dispatches /// requests to them accordingly. @@ -85,25 +146,25 @@ impl OnDemand { } } - /// Request a header by block number and CHT root hash. - /// Returns the header. - pub fn header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver { + /// Request a header's hash by block number and CHT root hash. + /// Returns the hash. + pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); - cache.block_hash(&req.num()).and_then(|hash| cache.block_header(&hash)) + cache.block_hash(&req.num()) }; match cached { - Some(hdr) => sender.complete(hdr), - None => self.dispatch_header_by_number(ctx, req, ChtProofSender::Header(sender)), + Some(hash) => sender.complete(hash), + None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), } receiver } /// Request a canonical block's chain score. /// Returns the chain score. - pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver { + pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); @@ -112,71 +173,33 @@ impl OnDemand { match cached { Some(score) => sender.complete(score), - None => self.dispatch_header_by_number(ctx, req, ChtProofSender::ChainScore(sender)), + None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), } receiver } - /// Request a canonical block's chain score. - /// Returns the header and chain score. - pub fn header_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver<(encoded::Header, U256)> { + /// Request a canonical block's hash and chain score by number. + /// Returns the hash and chain score. + pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<(H256, U256)> { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); let hash = cache.block_hash(&req.num()); ( - hash.clone().and_then(|hash| cache.block_header(&hash)), + hash.clone(), hash.and_then(|hash| cache.chain_score(&hash)), ) }; match cached { - (Some(hdr), Some(score)) => sender.complete((hdr, score)), - _ => self.dispatch_header_by_number(ctx, req, ChtProofSender::Both(sender)), + (Some(hash), Some(score)) => sender.complete((hash, score)), + _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), } receiver } - // dispatch the request, completing the request if no peers available. - fn dispatch_header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber, sender: ChtProofSender) { - let num = req.num(); - let cht_num = req.cht_num(); - - let les_req = LesRequest::HeaderProofs(les_request::HeaderProofs { - requests: vec![les_request::HeaderProof { - cht_number: cht_num, - block_number: num, - from_level: 0, - }], - }); - - let pending = Pending::HeaderByNumber(req, sender); - - // we're looking for a peer with serveHeaders who's far enough along in the - // chain. - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_headers && peer.status.head_num >= num { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - }, - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request a header by hash. This is less accurate than by-number because we don't know /// where in the chain this header lies, and therefore can't find a peer who is supposed to have /// it as easily. @@ -184,50 +207,11 @@ impl OnDemand { let (sender, receiver) = oneshot::channel(); match self.cache.lock().block_header(&req.0) { Some(hdr) => sender.complete(hdr), - None => self.dispatch_header_by_hash(ctx, req, sender), + None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } receiver } - fn dispatch_header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash, sender: Sender) { - let les_req = LesRequest::Headers(les_request::Headers { - start: req.0.into(), - max: 1, - skip: 0, - reverse: false, - }); - - // all we've got is a hash, so we'll just guess at peers who might have - // it randomly. - let mut potential_peers = self.peers.read().iter() - .filter(|&(_, peer)| peer.capabilities.serve_headers) - .map(|(id, _)| *id) - .collect::>(); - - let mut rng = ::rand::thread_rng(); - ::rand::Rng::shuffle(&mut rng, &mut potential_peers); - - let pending = Pending::HeaderByHash(req, sender); - - for id in potential_peers { - match ctx.request_from(id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request a block, given its header. Block bodies are requestable by hash only, /// and the header is required anyway to verify and complete the block body /// -- this just doesn't obscure the network query. @@ -251,41 +235,12 @@ impl OnDemand { sender.complete(encoded::Block::new(stream.out())); } - None => self.dispatch_block(ctx, req, sender), + None => self.dispatch(ctx, Pending::Block(req, sender)), } } receiver } - fn dispatch_block(&self, ctx: &BasicContext, req: request::Body, sender: Sender) { - let num = req.header.number(); - let les_req = LesRequest::Bodies(les_request::Bodies { - block_hashes: vec![req.hash], - }); - let pending = Pending::Block(req, sender); - - // we're looking for a peer with serveChainSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request the receipts for a block. The header serves two purposes: /// provide the block hash to fetch receipts for, and for verification of the receipts root. pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Receiver> { @@ -297,84 +252,21 @@ impl OnDemand { } else { match self.cache.lock().block_receipts(&req.0.hash()) { Some(receipts) => sender.complete(receipts), - None => self.dispatch_block_receipts(ctx, req, sender), + None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } } receiver } - fn dispatch_block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts, sender: Sender>) { - let num = req.0.number(); - let les_req = LesRequest::Receipts(les_request::Receipts { - block_hashes: vec![req.0.hash()], - }); - let pending = Pending::BlockReceipts(req, sender); - - // we're looking for a peer with serveChainSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request an account by address and block header -- which gives a hash to query and a state root /// to verify against. - pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { + pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver> { let (sender, receiver) = oneshot::channel(); - self.dispatch_account(ctx, req, sender); + self.dispatch(ctx, Pending::Account(req, sender)); receiver } - fn dispatch_account(&self, ctx: &BasicContext, req: request::Account, sender: Sender) { - let num = req.header.number(); - let les_req = LesRequest::StateProofs(les_request::StateProofs { - requests: vec![les_request::StateProof { - block: req.header.hash(), - key1: ::util::Hashable::sha3(&req.address), - key2: None, - from_level: 0, - }], - }); - let pending = Pending::Account(req, sender); - - // we're looking for a peer with serveStateSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request code by address, known code hash, and block header. pub fn code(&self, ctx: &BasicContext, req: request::Code) -> Receiver { let (sender, receiver) = oneshot::channel(); @@ -383,88 +275,50 @@ impl OnDemand { if req.code_hash == ::util::sha3::SHA3_EMPTY { sender.complete(Vec::new()) } else { - self.dispatch_code(ctx, req, sender); + self.dispatch(ctx, Pending::Code(req, sender)); } receiver } - fn dispatch_code(&self, ctx: &BasicContext, req: request::Code, sender: Sender) { - let num = req.block_id.1; - let les_req = LesRequest::Codes(les_request::ContractCodes { - code_requests: vec![les_request::ContractCode { - block_hash: req.block_id.0, - account_key: ::util::Hashable::sha3(&req.address), - }] - }); - let pending = Pending::Code(req, sender); - - // we're looking for a peer with serveStateSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request proof-of-execution for a transaction. pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { let (sender, receiver) = oneshot::channel(); - self.dispatch_transaction_proof(ctx, req, sender); + self.dispatch(ctx, Pending::TxProof(req, sender)); receiver } - fn dispatch_transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof, sender: Sender>) { - let num = req.header.number(); - let les_req = LesRequest::TransactionProof(les_request::TransactionProof { - at: req.header.hash(), - from: req.tx.sender(), - gas: req.tx.gas, - gas_price: req.tx.gas_price, - action: req.tx.action.clone(), - value: req.tx.value, - data: req.tx.data.clone(), - }); - let pending = Pending::TxProof(req, sender); + // dispatch the request, with a "suitability" function to filter acceptable peers. + fn dispatch(&self, ctx: &BasicContext, pending: Pending) { + let mut builder = basic_request::RequestBuilder::default(); + builder.push(pending.make_request()) + .expect("make_request always returns fully complete request; qed"); + + let complete = builder.build(); - // we're looking for a peer with serveStateSince(num) for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + if !peer.can_handle(&pending) { continue } + match ctx.request_from(*id, complete.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + pending, + ); + return } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), } } trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) + self.orphaned_requests.write().push(pending); } + // dispatch orphaned requests, and discard those for which the corresponding // receiver has been dropped. fn dispatch_orphaned(&self, ctx: &BasicContext) { @@ -494,30 +348,22 @@ impl OnDemand { let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); - for orphaned in to_dispatch { - match orphaned { - Pending::HeaderByNumber(req, mut sender) => { - let hangup = match sender { + for mut orphaned in to_dispatch { + let hung_up = match orphaned { + Pending::HeaderProof(_, ref mut sender) => match *sender { ChtProofSender::Both(ref mut s) => check_hangup(s), - ChtProofSender::Header(ref mut s) => check_hangup(s), + ChtProofSender::Hash(ref mut s) => check_hangup(s), ChtProofSender::ChainScore(ref mut s) => check_hangup(s), - }; + }, + Pending::HeaderByHash(_, ref mut sender) => check_hangup(sender), + Pending::Block(_, ref mut sender) => check_hangup(sender), + Pending::BlockReceipts(_, ref mut sender) => check_hangup(sender), + Pending::Account(_, ref mut sender) => check_hangup(sender), + Pending::Code(_, ref mut sender) => check_hangup(sender), + Pending::TxProof(_, ref mut sender) => check_hangup(sender), + }; - if !hangup { self.dispatch_header_by_number(ctx, req, sender) } - } - Pending::HeaderByHash(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_header_by_hash(ctx, req, sender) }, - Pending::Block(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_block(ctx, req, sender) }, - Pending::BlockReceipts(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_block_receipts(ctx, req, sender) }, - Pending::Account(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_account(ctx, req, sender) }, - Pending::Code(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_code(ctx, req, sender) }, - Pending::TxProof(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_transaction_proof(ctx, req, sender) } - } + if !hung_up { self.dispatch(ctx, orphaned) } } } } @@ -555,218 +401,126 @@ impl Handler for OnDemand { self.dispatch_orphaned(ctx.as_basic()); } - fn on_header_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[(Bytes, Vec)]) { + fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { let peer = ctx.peer(); let req = match self.pending_requests.write().remove(&req_id) { Some(req) => req, None => return, }; + let response = match responses.get(0) { + Some(response) => response, + None => { + trace!(target: "on_demand", "Ignoring empty response for request {}", req_id); + self.dispatch(ctx.as_basic(), req); + return; + } + }; + + // handle the response appropriately for the request. + // all branches which do not return early lead to disabling of the peer + // due to misbehavior. match req { - Pending::HeaderByNumber(req, sender) => { - if let Some(&(ref header, ref proof)) = proofs.get(0) { - match req.check_response(header, proof) { - Ok((header, score)) => { + Pending::HeaderProof(req, sender) => { + if let NetworkResponse::HeaderProof(ref response) = *response { + match req.check_response(&response.proof) { + Ok((hash, score)) => { let mut cache = self.cache.lock(); - let hash = header.hash(); - cache.insert_block_header(hash, header.clone()); - cache.insert_block_hash(header.number(), hash); + cache.insert_block_hash(req.num(), hash); cache.insert_chain_score(hash, score); match sender { - ChtProofSender::Both(sender) => sender.complete((header, score)), - ChtProofSender::Header(sender) => sender.complete(header), + ChtProofSender::Both(sender) => sender.complete((hash, score)), + ChtProofSender::Hash(sender) => sender.complete(hash), ChtProofSender::ChainScore(sender) => sender.complete(score), } - return } - Err(e) => { - warn!("Error handling response for header request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for header request: {:?}", e), } } - - self.dispatch_header_by_number(ctx.as_basic(), req, sender); } - _ => panic!("Only header by number request fetches header proofs; qed"), - } - } - - fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::HeaderByHash(req, sender) => { - if let Some(ref header) = headers.get(0) { - match req.check_response(header) { - Ok(header) => { - self.cache.lock().insert_block_header(req.0, header.clone()); - sender.complete(header); - return - } - Err(e) => { - warn!("Error handling response for header request: {:?}", e); - ctx.disable_peer(peer); + if let NetworkResponse::Headers(ref response) = *response { + if let Some(header) = response.headers.get(0) { + match req.check_response(header) { + Ok(header) => { + self.cache.lock().insert_block_header(req.0, header.clone()); + sender.complete(header); + return + } + Err(e) => warn!("Error handling response for header request: {:?}", e), } } } - - self.dispatch_header_by_hash(ctx.as_basic(), req, sender); } - _ => panic!("Only header by hash request fetches headers; qed"), - } - } - - fn on_block_bodies(&self, ctx: &EventContext, req_id: ReqId, bodies: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Block(req, sender) => { - if let Some(ref body) = bodies.get(0) { - match req.check_response(body) { + if let NetworkResponse::Body(ref response) = *response { + match req.check_response(&response.body) { Ok(block) => { - let body = encoded::Body::new(body.to_vec()); - self.cache.lock().insert_block_body(req.hash, body); + self.cache.lock().insert_block_body(req.hash, response.body.clone()); sender.complete(block); return } - Err(e) => { - warn!("Error handling response for block request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for block request: {:?}", e), } } - - self.dispatch_block(ctx.as_basic(), req, sender); } - _ => panic!("Only block request fetches bodies; qed"), - } - } - - fn on_receipts(&self, ctx: &EventContext, req_id: ReqId, receipts: &[Vec]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::BlockReceipts(req, sender) => { - if let Some(ref receipts) = receipts.get(0) { - match req.check_response(receipts) { + if let NetworkResponse::Receipts(ref response) = *response { + match req.check_response(&response.receipts) { Ok(receipts) => { let hash = req.0.hash(); self.cache.lock().insert_block_receipts(hash, receipts.clone()); sender.complete(receipts); return } - Err(e) => { - warn!("Error handling response for receipts request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for receipts request: {:?}", e), } } - - self.dispatch_block_receipts(ctx.as_basic(), req, sender); } - _ => panic!("Only receipts request fetches receipts; qed"), - } - } - - fn on_state_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[Vec]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Account(req, sender) => { - if let Some(ref proof) = proofs.get(0) { - match req.check_response(proof) { - Ok(proof) => { - sender.complete(proof); + if let NetworkResponse::Account(ref response) = *response { + match req.check_response(&response.proof) { + Ok(maybe_account) => { + // TODO: validate against request outputs. + // needs engine + env info as part of request. + sender.complete(maybe_account); return } - Err(e) => { - warn!("Error handling response for state request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for state request: {:?}", e), } } - - self.dispatch_account(ctx.as_basic(), req, sender); } - _ => panic!("Only account request fetches state proof; qed"), - } - } - - fn on_code(&self, ctx: &EventContext, req_id: ReqId, codes: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Code(req, sender) => { - if let Some(code) = codes.get(0) { - match req.check_response(code.as_slice()) { + if let NetworkResponse::Code(ref response) = *response { + match req.check_response(response.code.as_slice()) { Ok(()) => { - sender.complete(code.clone()); + sender.complete(response.code.clone()); return } - Err(e) => { - warn!("Error handling response for code request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for code request: {:?}", e), } - - self.dispatch_code(ctx.as_basic(), req, sender); } } - _ => panic!("Only code request fetches code; qed"), - } - } - - fn on_transaction_proof(&self, ctx: &EventContext, req_id: ReqId, items: &[DBValue]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::TxProof(req, sender) => { - match req.check_response(items) { - ProvedExecution::Complete(executed) => { - sender.complete(Ok(executed)); - return - } - ProvedExecution::Failed(err) => { - sender.complete(Err(err)); - return - } - ProvedExecution::BadProof => { - warn!("Error handling response for transaction proof request"); - ctx.disable_peer(peer); + if let NetworkResponse::Execution(ref response) = *response { + match req.check_response(&response.items) { + ProvedExecution::Complete(executed) => { + sender.complete(Ok(executed)); + return + } + ProvedExecution::Failed(err) => { + sender.complete(Err(err)); + return + } + ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), } } - - self.dispatch_transaction_proof(ctx.as_basic(), req, sender); } - _ => panic!("Only transaction proof request dispatches transaction proof requests; qed"), } + + ctx.disable_peer(peer); } fn tick(&self, ctx: &BasicContext) { diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 3a72db51d..4f028a71c 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -61,9 +61,9 @@ impl From> for Error { } } -/// Request for a header by number. +/// Request for header proof by number #[derive(Debug, Clone, PartialEq, Eq)] -pub struct HeaderByNumber { +pub struct HeaderProof { /// The header's number. num: u64, /// The cht number for the given block number. @@ -72,11 +72,11 @@ pub struct HeaderByNumber { cht_root: H256, } -impl HeaderByNumber { +impl HeaderProof { /// Construct a new header-by-number request. Fails if the given number is 0. /// Provide the expected CHT root to compare against. pub fn new(num: u64, cht_root: H256) -> Option { - ::cht::block_to_cht_number(num).map(|cht_num| HeaderByNumber { + ::cht::block_to_cht_number(num).map(|cht_num| HeaderProof { num: num, cht_num: cht_num, cht_root: cht_root, @@ -92,18 +92,11 @@ impl HeaderByNumber { /// Access the expected CHT root. pub fn cht_root(&self) -> H256 { self.cht_root } - /// Check a response with a header and cht proof. - pub fn check_response(&self, header: &[u8], proof: &[Bytes]) -> Result<(encoded::Header, U256), Error> { - let (expected_hash, td) = match ::cht::check_proof(proof, self.num, self.cht_root) { - Some((expected_hash, td)) => (expected_hash, td), - None => return Err(Error::BadProof), - }; - - // and compare the hash to the found header. - let found_hash = header.sha3(); - match expected_hash == found_hash { - true => Ok((encoded::Header::new(header.to_vec()), td)), - false => Err(Error::WrongHash(expected_hash, found_hash)), + /// Check a response with a CHT proof, get a hash and total difficulty back. + pub fn check_response(&self, proof: &[Bytes]) -> Result<(H256, U256), Error> { + match ::cht::check_proof(proof, self.num, self.cht_root) { + Some((expected_hash, td)) => Ok((expected_hash, td)), + None => Err(Error::BadProof), } } } @@ -114,10 +107,10 @@ pub struct HeaderByHash(pub H256); impl HeaderByHash { /// Check a response for the header. - pub fn check_response(&self, header: &[u8]) -> Result { + pub fn check_response(&self, header: &encoded::Header) -> Result { let hash = header.sha3(); match hash == self.0 { - true => Ok(encoded::Header::new(header.to_vec())), + true => Ok(header.clone()), false => Err(Error::WrongHash(self.0, hash)), } } @@ -143,16 +136,14 @@ impl Body { } /// Check a response for this block body. - pub fn check_response(&self, body: &[u8]) -> Result { - let body_view = UntrustedRlp::new(&body); - + pub fn check_response(&self, body: &encoded::Body) -> Result { // check the integrity of the the body against the header - let tx_root = ::util::triehash::ordered_trie_root(body_view.at(0)?.iter().map(|r| r.as_raw().to_vec())); + let tx_root = ::util::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec())); if tx_root != self.header.transactions_root() { return Err(Error::WrongTrieRoot(self.header.transactions_root(), tx_root)); } - let uncles_hash = body_view.at(1)?.as_raw().sha3(); + let uncles_hash = body.rlp().at(1).as_raw().sha3(); if uncles_hash != self.header.uncles_hash() { return Err(Error::WrongHash(self.header.uncles_hash(), uncles_hash)); } @@ -160,7 +151,7 @@ impl Body { // concatenate the header and the body. let mut stream = RlpStream::new_list(3); stream.append_raw(self.header.rlp().as_raw(), 1); - stream.append_raw(body, 2); + stream.append_raw(&body.rlp().as_raw(), 2); Ok(encoded::Block::new(stream.out())) } @@ -194,7 +185,7 @@ pub struct Account { impl Account { /// Check a response with an account against the stored header. - pub fn check_response(&self, proof: &[Bytes]) -> Result { + pub fn check_response(&self, proof: &[Bytes]) -> Result, Error> { let state_root = self.header.state_root(); let mut db = MemoryDB::new(); @@ -203,14 +194,14 @@ impl Account { match TrieDB::new(&db, &state_root).and_then(|t| t.get(&self.address.sha3()))? { Some(val) => { let rlp = UntrustedRlp::new(&val); - Ok(BasicAccount { + Ok(Some(BasicAccount { nonce: rlp.val_at(0)?, balance: rlp.val_at(1)?, storage_root: rlp.val_at(2)?, code_hash: rlp.val_at(3)?, - }) + })) }, - None => Err(Error::BadProof) + None => Ok(None), } } } @@ -219,8 +210,6 @@ impl Account { pub struct Code { /// Block hash, number pair. pub block_id: (H256, u64), - /// Address requested. - pub address: Address, /// Account's code hash. pub code_hash: H256, } @@ -278,11 +267,11 @@ mod tests { #[test] fn no_invalid_header_by_number() { - assert!(HeaderByNumber::new(0, Default::default()).is_none()) + assert!(HeaderProof::new(0, Default::default()).is_none()) } #[test] - fn check_header_by_number() { + fn check_header_proof() { use ::cht; let test_client = TestBlockChainClient::new(); @@ -303,11 +292,11 @@ mod tests { }; let proof = cht.prove(10_000, 0).unwrap().unwrap(); - let req = HeaderByNumber::new(10_000, cht.root()).unwrap(); + let req = HeaderProof::new(10_000, cht.root()).unwrap(); let raw_header = test_client.block_header(::ethcore::ids::BlockId::Number(10_000)).unwrap(); - assert!(req.check_response(&raw_header.into_inner(), &proof[..]).is_ok()); + assert!(req.check_response(&proof[..]).is_ok()); } #[test] @@ -334,7 +323,8 @@ mod tests { hash: header.hash(), }; - assert!(req.check_response(&*body_stream.drain()).is_ok()) + let response = encoded::Body::new(body_stream.drain()); + assert!(req.check_response(&response).is_ok()) } #[test] @@ -412,7 +402,6 @@ mod tests { let code = vec![1u8; 256]; let req = Code { block_id: (Default::default(), 2), - address: Default::default(), code_hash: ::util::Hashable::sha3(&code), }; diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 1ebe1c75b..a3880da44 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -202,6 +202,8 @@ impl Encodable for HashOrNumber { } /// All request types, as they're sent over the network. +/// They may be incomplete, with back-references to outputs +/// of prior requests. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Request { /// A request for block headers. @@ -223,7 +225,7 @@ pub enum Request { Execution(IncompleteExecutionRequest), } -/// All request types, as they're sent over the network. +/// All request types, in an answerable state. #[derive(Debug, Clone, PartialEq, Eq)] pub enum CompleteRequest { /// A request for block headers. From 04f106aad8418a4a769a8e90fcbe99998cf8c543 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 20:29:06 +0100 Subject: [PATCH 37/91] migrate oneshot::complete to send in on_demand --- ethcore/light/src/on_demand/mod.rs | 42 +++++++++++++++--------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index df8a6c6a9..1428efa50 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -31,7 +31,7 @@ use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::{RlpStream, Stream}; -use util::{Bytes, DBValue, RwLock, Mutex, U256, H256}; +use util::{Bytes, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; @@ -52,7 +52,7 @@ impl Peer { match *pending { Pending::HeaderProof(ref req, _) => self.capabilities.serve_headers && self.status.head_num > req.num(), - Pending::HeaderByHash(ref req, _) => self.capabilities.serve_headers, + Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, Pending::Block(ref req, _) => self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), Pending::BlockReceipts(ref req, _) => @@ -156,7 +156,7 @@ impl OnDemand { }; match cached { - Some(hash) => sender.complete(hash), + Some(hash) => sender.send(hash).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), } receiver @@ -172,7 +172,7 @@ impl OnDemand { }; match cached { - Some(score) => sender.complete(score), + Some(score) => sender.send(score).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), } @@ -193,7 +193,7 @@ impl OnDemand { }; match cached { - (Some(hash), Some(score)) => sender.complete((hash, score)), + (Some(hash), Some(score)) => sender.send((hash, score)).expect("receiver alive here; qed"), _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), } @@ -206,7 +206,7 @@ impl OnDemand { pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { let (sender, receiver) = oneshot::channel(); match self.cache.lock().block_header(&req.0) { - Some(hdr) => sender.complete(hdr), + Some(hdr) => sender.send(hdr).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } receiver @@ -225,7 +225,7 @@ impl OnDemand { stream.begin_list(0); stream.begin_list(0); - sender.complete(encoded::Block::new(stream.out())) + sender.send(encoded::Block::new(stream.out())).expect("receiver alive here; qed"); } else { match self.cache.lock().block_body(&req.hash) { Some(body) => { @@ -233,7 +233,7 @@ impl OnDemand { stream.append_raw(&req.header.into_inner(), 1); stream.append_raw(&body.into_inner(), 2); - sender.complete(encoded::Block::new(stream.out())); + sender.send(encoded::Block::new(stream.out())).expect("receiver alive here; qed"); } None => self.dispatch(ctx, Pending::Block(req, sender)), } @@ -248,10 +248,10 @@ impl OnDemand { // fast path for empty receipts. if req.0.receipts_root() == SHA3_NULL_RLP { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect("receiver alive here; qed"); } else { match self.cache.lock().block_receipts(&req.0.hash()) { - Some(receipts) => sender.complete(receipts), + Some(receipts) => sender.send(receipts).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } } @@ -273,7 +273,7 @@ impl OnDemand { // fast path for no code. if req.code_hash == ::util::sha3::SHA3_EMPTY { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect("receiver alive here; qed") } else { self.dispatch(ctx, Pending::Code(req, sender)); } @@ -430,9 +430,9 @@ impl Handler for OnDemand { cache.insert_chain_score(hash, score); match sender { - ChtProofSender::Both(sender) => sender.complete((hash, score)), - ChtProofSender::Hash(sender) => sender.complete(hash), - ChtProofSender::ChainScore(sender) => sender.complete(score), + ChtProofSender::Both(sender) => { let _ = sender.send((hash, score)); } + ChtProofSender::Hash(sender) => { let _ = sender.send(hash); } + ChtProofSender::ChainScore(sender) => { let _ = sender.send(score); } } return } @@ -446,7 +446,7 @@ impl Handler for OnDemand { match req.check_response(header) { Ok(header) => { self.cache.lock().insert_block_header(req.0, header.clone()); - sender.complete(header); + let _ = sender.send(header); return } Err(e) => warn!("Error handling response for header request: {:?}", e), @@ -459,7 +459,7 @@ impl Handler for OnDemand { match req.check_response(&response.body) { Ok(block) => { self.cache.lock().insert_block_body(req.hash, response.body.clone()); - sender.complete(block); + let _ = sender.send(block); return } Err(e) => warn!("Error handling response for block request: {:?}", e), @@ -472,7 +472,7 @@ impl Handler for OnDemand { Ok(receipts) => { let hash = req.0.hash(); self.cache.lock().insert_block_receipts(hash, receipts.clone()); - sender.complete(receipts); + let _ = sender.send(receipts); return } Err(e) => warn!("Error handling response for receipts request: {:?}", e), @@ -485,7 +485,7 @@ impl Handler for OnDemand { Ok(maybe_account) => { // TODO: validate against request outputs. // needs engine + env info as part of request. - sender.complete(maybe_account); + let _ = sender.send(maybe_account); return } Err(e) => warn!("Error handling response for state request: {:?}", e), @@ -496,7 +496,7 @@ impl Handler for OnDemand { if let NetworkResponse::Code(ref response) = *response { match req.check_response(response.code.as_slice()) { Ok(()) => { - sender.complete(response.code.clone()); + let _ = sender.send(response.code.clone()); return } Err(e) => warn!("Error handling response for code request: {:?}", e), @@ -507,11 +507,11 @@ impl Handler for OnDemand { if let NetworkResponse::Execution(ref response) = *response { match req.check_response(&response.items) { ProvedExecution::Complete(executed) => { - sender.complete(Ok(executed)); + let _ = sender.send(Ok(executed)); return } ProvedExecution::Failed(err) => { - sender.complete(Err(err)); + let _ = sender.send(Err(err)); return } ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), From b5527415d6bf10c44ba14e307d812219fdd9b1f1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 20:33:45 +0100 Subject: [PATCH 38/91] get on_demand tests to compile --- ethcore/light/src/on_demand/mod.rs | 5 ++--- ethcore/light/src/on_demand/request.rs | 8 +++----- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 1428efa50..aceba66e2 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -536,7 +536,7 @@ mod tests { use cache::Cache; use net::{Announcement, BasicContext, ReqId, Error as LesError}; - use request::{Request as LesRequest, Kind as LesRequestKind}; + use request::Requests; use network::{PeerId, NodeId}; use time::Duration; @@ -546,11 +546,10 @@ mod tests { impl BasicContext for FakeContext { fn persistent_peer_id(&self, _: PeerId) -> Option { None } - fn request_from(&self, _: PeerId, _: LesRequest) -> Result { + fn request_from(&self, _: PeerId, _: Requests) -> Result { unimplemented!() } fn make_announcement(&self, _: Announcement) { } - fn max_requests(&self, _: PeerId, _: LesRequestKind) -> usize { 0 } fn disconnect_peer(&self, _: PeerId) { } fn disable_peer(&self, _: PeerId) { } } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 4f028a71c..825ca2be2 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -294,8 +294,6 @@ mod tests { let proof = cht.prove(10_000, 0).unwrap().unwrap(); let req = HeaderProof::new(10_000, cht.root()).unwrap(); - let raw_header = test_client.block_header(::ethcore::ids::BlockId::Number(10_000)).unwrap(); - assert!(req.check_response(&proof[..]).is_ok()); } @@ -305,9 +303,9 @@ mod tests { header.set_number(10_000); header.set_extra_data(b"test_header".to_vec()); let hash = header.hash(); - let raw_header = ::rlp::encode(&header); + let raw_header = encoded::Header::new(::rlp::encode(&header).to_vec()); - assert!(HeaderByHash(hash).check_response(&*raw_header).is_ok()) + assert!(HeaderByHash(hash).check_response(&raw_header).is_ok()) } #[test] @@ -323,7 +321,7 @@ mod tests { hash: header.hash(), }; - let response = encoded::Body::new(body_stream.drain()); + let response = encoded::Body::new(body_stream.drain().to_vec()); assert!(req.check_response(&response).is_ok()) } From fa42b6acecab326f773ea3ec7a4d342b02d0dd18 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 23:51:47 +0100 Subject: [PATCH 39/91] port ethsync to PIP messages --- sync/src/light_sync/mod.rs | 65 ++++++++++++++++++--------- sync/src/light_sync/response.rs | 34 +++++++------- sync/src/light_sync/sync_round.rs | 27 +++++------ sync/src/light_sync/tests/test_net.rs | 6 ++- 4 files changed, 79 insertions(+), 53 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index fba89dd7b..4590103e7 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -16,7 +16,7 @@ //! Light client synchronization. //! -//! This will synchronize the header chain using LES messages. +//! This will synchronize the header chain using PIP messages. //! Dataflow is largely one-directional as headers are pushed into //! the light client queue for import. Where possible, they are batched //! in groups. @@ -36,14 +36,15 @@ use std::collections::HashMap; use std::mem; use std::sync::Arc; +use ethcore::encoded; use light::client::{AsLightClient, LightChainClient}; use light::net::{ Announcement, Handler, BasicContext, EventContext, - Capabilities, ReqId, Status, + Capabilities, ReqId, Status, Error as NetError, }; -use light::request; +use light::request::{self, CompleteHeadersRequest as HeadersRequest}; use network::PeerId; -use util::{Bytes, U256, H256, Mutex, RwLock}; +use util::{U256, H256, Mutex, RwLock}; use rand::{Rng, OsRng}; use self::sync_round::{AbortReason, SyncRound, ResponseContext}; @@ -91,7 +92,7 @@ impl Peer { #[derive(Debug)] enum AncestorSearch { Queued(u64), // queued to search for blocks starting from here. - Awaiting(ReqId, u64, request::Headers), // awaiting response for this request. + Awaiting(ReqId, u64, HeadersRequest), // awaiting response for this request. Prehistoric, // prehistoric block found. TODO: start to roll back CHTs. FoundCommon(u64, H256), // common block found. Genesis, // common ancestor is the genesis. @@ -113,7 +114,7 @@ impl AncestorSearch { match self { AncestorSearch::Awaiting(id, start, req) => { if &id == ctx.req_id() { - match response::decode_and_verify(ctx.data(), &req) { + match response::verify(ctx.data(), &req) { Ok(headers) => { for header in &headers { if client.is_known(&header.hash()) { @@ -150,17 +151,17 @@ impl AncestorSearch { } fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch - where F: FnMut(request::Headers) -> Option + where F: FnMut(HeadersRequest) -> Option { - const BATCH_SIZE: usize = 64; + const BATCH_SIZE: u64 = 64; match self { AncestorSearch::Queued(start) => { - let batch_size = ::std::cmp::min(start as usize, BATCH_SIZE); + let batch_size = ::std::cmp::min(start, BATCH_SIZE); trace!(target: "sync", "Requesting {} reverse headers from {} to find common ancestor", batch_size, start); - let req = request::Headers { + let req = HeadersRequest { start: start.into(), max: batch_size, skip: 0, @@ -193,13 +194,13 @@ struct ResponseCtx<'a> { peer: PeerId, req_id: ReqId, ctx: &'a BasicContext, - data: &'a [Bytes], + data: &'a [encoded::Header], } impl<'a> ResponseContext for ResponseCtx<'a> { fn responder(&self) -> PeerId { self.peer } fn req_id(&self) -> &ReqId { &self.req_id } - fn data(&self) -> &[Bytes] { self.data } + fn data(&self) -> &[encoded::Header] { self.data } fn punish_responder(&self) { self.ctx.disable_peer(self.peer) } } @@ -313,11 +314,22 @@ impl Handler for LightSync { self.maintain_sync(ctx.as_basic()); } - fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - if !self.peers.read().contains_key(&ctx.peer()) { + fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[request::Response]) { + let peer = ctx.peer(); + if !self.peers.read().contains_key(&peer) { return } + let headers = match responses.get(0) { + Some(&request::Response::Headers(ref response)) => &response.headers[..], + Some(_) => { + trace!("Disabling peer {} for wrong response type.", peer); + ctx.disable_peer(peer); + &[] + } + None => &[], + }; + { let mut state = self.state.lock(); @@ -465,18 +477,27 @@ impl LightSync { // naive request dispatcher: just give to any peer which says it will // give us responses. - let dispatcher = move |req: request::Headers| { + let dispatcher = move |req: HeadersRequest| { rng.shuffle(&mut peer_ids); + let request = { + let mut builder = request::RequestBuilder::default(); + builder.push(request::Request::Headers(request::IncompleteHeadersRequest { + start: req.start.into(), + skip: req.skip, + max: req.max, + reverse: req.reverse, + })).expect("request provided fully complete with no unresolved back-references; qed"); + builder.build() + }; for peer in &peer_ids { - if ctx.max_requests(*peer, request::Kind::Headers) >= req.max { - match ctx.request_from(*peer, request::Request::Headers(req.clone())) { - Ok(id) => { - return Some(id) - } - Err(e) => - trace!(target: "sync", "Error requesting headers from viable peer: {}", e), + match ctx.request_from(*peer, request.clone()) { + Ok(id) => { + return Some(id) } + Err(NetError::NoCredits) => {} + Err(e) => + trace!(target: "sync", "Error requesting headers from viable peer: {}", e), } } diff --git a/sync/src/light_sync/response.rs b/sync/src/light_sync/response.rs index cb95824ce..d85d2548d 100644 --- a/sync/src/light_sync/response.rs +++ b/sync/src/light_sync/response.rs @@ -18,10 +18,11 @@ use std::fmt; +use ethcore::encoded; use ethcore::header::Header; -use light::request::{HashOrNumber, Headers as HeadersRequest}; -use rlp::{DecoderError, UntrustedRlp, View}; -use util::{Bytes, H256}; +use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest}; +use rlp::DecoderError; +use util::H256; /// Errors found when decoding headers and verifying with basic constraints. #[derive(Debug, PartialEq)] @@ -71,13 +72,13 @@ pub trait Constraint { fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>; } -/// Decode a response and do basic verification against a request. -pub fn decode_and_verify(headers: &[Bytes], request: &HeadersRequest) -> Result, BasicError> { - let headers: Vec<_> = try!(headers.iter().map(|x| UntrustedRlp::new(&x).as_val()).collect()); +/// Do basic verification of provided headers against a request. +pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result, BasicError> { + let headers: Vec<_> = headers.iter().map(|h| h.decode()).collect(); let reverse = request.reverse; - try!(Max(request.max).verify(&headers, reverse)); + try!(Max(request.max as usize).verify(&headers, reverse)); match request.start { HashOrNumber::Number(ref num) => try!(StartsAtNumber(*num).verify(&headers, reverse)), HashOrNumber::Hash(ref hash) => try!(StartsAtHash(*hash).verify(&headers, reverse)), @@ -150,8 +151,9 @@ impl Constraint for Max { #[cfg(test)] mod tests { + use ethcore::encoded; use ethcore::header::Header; - use light::request::Headers as HeadersRequest; + use light::request::CompleteHeadersRequest as HeadersRequest; use super::*; @@ -175,10 +177,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert!(decode_and_verify(&headers, &request).is_ok()); + assert!(verify(&headers, &request).is_ok()); } #[test] @@ -201,10 +203,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert!(decode_and_verify(&headers, &request).is_ok()); + assert!(verify(&headers, &request).is_ok()); } #[test] @@ -227,10 +229,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); + assert_eq!(verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); } #[test] @@ -246,9 +248,9 @@ mod tests { let mut header = Header::default(); header.set_number(x); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); + assert_eq!(verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); } } diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 6fa635214..dfa17aad4 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -20,13 +20,14 @@ use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::fmt; +use ethcore::encoded; use ethcore::header::Header; use light::net::ReqId; -use light::request::Headers as HeadersRequest; +use light::request::CompleteHeadersRequest as HeadersRequest; use network::PeerId; -use util::{Bytes, H256}; +use util::H256; use super::response; @@ -40,7 +41,7 @@ pub trait ResponseContext { /// Get the request ID this response corresponds to. fn req_id(&self) -> &ReqId; /// Get the (unverified) response data. - fn data(&self) -> &[Bytes]; + fn data(&self) -> &[encoded::Header]; /// Punish the responder. fn punish_responder(&self); } @@ -114,7 +115,7 @@ impl Fetcher { let needed_headers = HeadersRequest { start: high_rung.parent_hash().clone().into(), - max: diff as usize - 1, + max: diff - 1, skip: 0, reverse: true, }; @@ -190,7 +191,7 @@ impl Fetcher { return SyncRound::Fetch(self); } - match response::decode_and_verify(headers, &request.headers_request) { + match response::verify(headers, &request.headers_request) { Err(e) => { trace!(target: "sync", "Punishing peer {} for invalid response ({})", ctx.responder(), e); ctx.punish_responder(); @@ -286,21 +287,21 @@ impl Fetcher { } // Compute scaffold parameters from non-zero distance between start and target block: (skip, pivots). -fn scaffold_params(diff: u64) -> (u64, usize) { +fn scaffold_params(diff: u64) -> (u64, u64) { // default parameters. // amount of blocks between each scaffold pivot. const ROUND_SKIP: u64 = 255; // amount of scaffold pivots: these are the Xs in "X___X___X" - const ROUND_PIVOTS: usize = 256; + const ROUND_PIVOTS: u64 = 256; let rem = diff % (ROUND_SKIP + 1); if diff <= ROUND_SKIP { // just request headers from the start to the target. - (0, rem as usize) + (0, rem) } else { // the number of pivots necessary to exactly hit or overshoot the target. let pivots_to_target = (diff / (ROUND_SKIP + 1)) + if rem == 0 { 0 } else { 1 }; - let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS as u64) as usize; + let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS); (ROUND_SKIP, num_pivots) } } @@ -319,7 +320,7 @@ pub struct RoundStart { contributors: HashSet, attempt: usize, skip: u64, - pivots: usize, + pivots: u64, } impl RoundStart { @@ -372,7 +373,7 @@ impl RoundStart { } }; - match response::decode_and_verify(ctx.data(), &req) { + match response::verify(ctx.data(), &req) { Ok(headers) => { if self.sparse_headers.len() == 0 && headers.get(0).map_or(false, |x| x.parent_hash() != &self.start_block.1) { @@ -383,7 +384,7 @@ impl RoundStart { self.contributors.insert(ctx.responder()); self.sparse_headers.extend(headers); - if self.sparse_headers.len() == self.pivots { + if self.sparse_headers.len() as u64 == self.pivots { return if self.skip == 0 { SyncRound::abort(AbortReason::TargetReached, self.sparse_headers.into()) } else { @@ -429,7 +430,7 @@ impl RoundStart { let start = (self.start_block.0 + 1) + self.sparse_headers.len() as u64 * (self.skip + 1); - let max = self.pivots - self.sparse_headers.len(); + let max = self.pivots - self.sparse_headers.len() as u64; let headers_request = HeadersRequest { start: start.into(), diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index d0e472374..898f8766d 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -28,6 +28,7 @@ use io::IoChannel; use light::client::Client as LightClient; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::net::request_credits::FlowParams; +use light::provider::LightProvider; use network::{NodeId, PeerId}; use util::RwLock; @@ -71,7 +72,7 @@ enum PeerData { } // test peer type. -// Either a full peer or a LES peer. +// Either a full peer or a light peer. pub struct Peer { proto: LightProtocol, queue: RwLock>, @@ -115,7 +116,8 @@ impl Peer { }, }; - let mut proto = LightProtocol::new(chain.clone(), params); + let provider = LightProvider::new(chain.clone(), Arc::new(RwLock::new(Default::default()))); + let mut proto = LightProtocol::new(Arc::new(provider), params); proto.add_handler(sync.clone()); Peer { proto: proto, From 2ee3a7282b279047697aa392901d516fd04bd23f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Mar 2017 00:14:29 +0100 Subject: [PATCH 40/91] adjust to minor on_demand API changes in RPC --- rpc/src/v1/helpers/dispatch.rs | 5 ++++- rpc/src/v1/impls/light/eth.rs | 23 +++++++++++++++-------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index b11ada048..36d1b330f 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -268,7 +268,10 @@ impl LightDispatcher { })); match nonce_future { - Some(x) => x.map(|acc| acc.nonce).map_err(|_| errors::no_light_peers()).boxed(), + Some(x) => + x.map(|acc| acc.map_or_else(Default::default, |acc| acc.nonce)) + .map_err(|_| errors::no_light_peers()) + .boxed(), None => future::err(errors::network_disabled()).boxed() } } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index f889faf00..a35d48fb6 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -105,15 +105,22 @@ impl EthClient { match cht_root { None => return future::ok(None).boxed(), Some(root) => { - let req = request::HeaderByNumber::new(n, root) + let req = request::HeaderProof::new(n, root) .expect("only fails for 0; client always stores genesis; client already queried; qed"); - self.sync.with_context(|ctx| - self.on_demand.header_by_number(ctx, req) - .map(Some) - .map_err(err_premature_cancel) - .boxed() - ) + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + self.sync.with_context(|ctx| { + let fut = self.on_demand.hash_by_number(ctx, req) + .map(request::HeaderByHash) + .map_err(err_premature_cancel); + + fut.and_then(move |req| { + match sync.with_context(|ctx| on_demand.header_by_hash(ctx, req)) { + Some(fut) => fut.map_err(err_premature_cancel).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + }).map(Some).boxed() + }) } } } @@ -149,7 +156,7 @@ impl EthClient { sync.with_context(|ctx| on_demand.account(ctx, request::Account { header: header, address: address, - }).map(Some)) + })) .map(|x| x.map_err(err_premature_cancel).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) }).boxed() From c13f01c4f96bfe604df570154f0edb383e9bc3a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 17 Mar 2017 09:29:43 +0100 Subject: [PATCH 41/91] Using dedicated branch for jsonrpc --- Cargo.lock | 115 ++++++++++++++++----------------------- Cargo.toml | 6 +- dapps/Cargo.toml | 6 +- ipfs/Cargo.toml | 3 +- ipfs/src/error.rs | 8 +-- ipfs/src/lib.rs | 21 ++++--- rpc/Cargo.toml | 8 +-- rpc_client/Cargo.toml | 8 +-- rpc_client/src/client.rs | 20 ++++--- signer/Cargo.toml | 6 +- stratum/Cargo.toml | 6 +- 11 files changed, 95 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8343c4dc..a705d99db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5,7 +5,7 @@ dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", + "ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -27,9 +27,9 @@ dependencies = [ "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", + "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -291,7 +291,7 @@ dependencies = [ [[package]] name = "ctrlc" version = "1.1.1" -source = "git+https://github.com/ethcore/rust-ctrlc.git#f4927770f89eca80ec250911eea3adcbf579ac48" +source = "git+https://github.com/paritytech/rust-ctrlc.git#b523017108bb2d571a7a69bd97bc406e63bc7a9d" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -456,9 +456,9 @@ dependencies = [ "ethcore-util 1.7.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -631,10 +631,10 @@ dependencies = [ "ethsync 1.7.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", @@ -677,14 +677,14 @@ dependencies = [ "ethcore-io 1.7.0", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-ui 1.7.0", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)", + "ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable)", ] [[package]] @@ -698,9 +698,9 @@ dependencies = [ "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -980,25 +980,6 @@ dependencies = [ "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "hyper" -version = "0.10.0-a.0" -source = "git+https://github.com/ethcore/hyper#453c683b52208fefc32d29e4ac7c863439b2321f" -dependencies = [ - "cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rotor 0.6.3 (git+https://github.com/ethcore/rotor)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "hyper" version = "0.10.0-a.0" @@ -1119,7 +1100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1131,11 +1112,11 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1144,10 +1125,10 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1156,19 +1137,19 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1176,9 +1157,9 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1187,10 +1168,10 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1678,8 +1659,7 @@ dependencies = [ "cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.7.0", "ethcore-util 1.7.0", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -1716,14 +1696,14 @@ dependencies = [ "ethcore-signer 1.7.0", "ethcore-util 1.7.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)", + "ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable)", ] [[package]] @@ -2629,7 +2609,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ws" version = "0.5.3" -source = "git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable#f5c0b35d660244d1b7500693c8cc28277ce1d418" +source = "git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable#f5c0b35d660244d1b7500693c8cc28277ce1d418" dependencies = [ "bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)", "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2715,7 +2695,7 @@ dependencies = [ "checksum core-foundation-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "05eed248dc504a5391c63794fe4fb64f46f071280afaa1b73308f3c0ce4574c5" "checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97" "checksum crypt32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e34988f7e069e0b2f3bfc064295161e489b2d4e04a2e4248fb94360cdf00b4ec" -"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "" +"checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "" "checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf" "checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf" "checksum docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4cc0acb4ce0828c6a5a11d47baa432fe885881c27428c3a4e473e454ffe57a76" @@ -2737,7 +2717,6 @@ dependencies = [ "checksum hidapi 0.3.1 (git+https://github.com/ethcore/hidapi-rs)" = "" "checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" -"checksum hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)" = "" "checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "" "checksum hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)" = "43a15e3273b2133aaac0150478ab443fb89f15c3de41d8d93d8f3bb14bf560f6" "checksum hyper 0.9.18 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9bf64f730d6ee4b0528a5f0a316363da9d8104318731509d4ccc86248f82b3" @@ -2749,13 +2728,13 @@ dependencies = [ "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d95557e7ba6b71377b0f2c3b3ae96c53f1b75a926a6901a500f557a370af730a" "checksum itoa 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91fd9dc2c587067de817fec4ad355e3818c3d893a78cab32a0a474c7a15bb8d5" -"checksum jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" @@ -2910,7 +2889,7 @@ dependencies = [ "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "" +"checksum ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable)" = "" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453" "checksum xml-rs 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "65e74b96bd3179209dc70a980da6df843dff09e46eee103a0376c0949257e3ef" diff --git a/Cargo.toml b/Cargo.toml index 62abb2159..954f3f240 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,9 +24,9 @@ serde = "0.9" serde_json = "0.9" app_dirs = "1.1.1" fdlimit = "0.1" -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } -ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } +ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethsync = { path = "sync" } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 95e1f3f56..b2c4945b5 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -25,10 +25,10 @@ unicase = "1.3" url = "1.0" zip = { version = "0.1", default-features = false } -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } # TODO [ToDr] Temporary solution, server should be merged with RPC. -jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-devtools = { path = "../devtools" } ethcore-rpc = { path = "../rpc" } diff --git a/ipfs/Cargo.toml b/ipfs/Cargo.toml index 1443c8cf2..c6241a7aa 100644 --- a/ipfs/Cargo.toml +++ b/ipfs/Cargo.toml @@ -8,9 +8,8 @@ authors = ["Parity Technologies "] [dependencies] ethcore = { path = "../ethcore" } ethcore-util = { path = "../util" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } rlp = { path = "../util/rlp" } mime = "0.2" -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } cid = "0.2.1" multihash = "0.5" diff --git a/ipfs/src/error.rs b/ipfs/src/error.rs index 1cbd54f1c..fadd75b9b 100644 --- a/ipfs/src/error.rs +++ b/ipfs/src/error.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use {multihash, cid, hyper}; +use {multihash, cid, http}; use route::Out; pub type Result = ::std::result::Result; @@ -25,7 +25,7 @@ pub enum ServerError { /// Wrapped `std::io::Error` IoError(::std::io::Error), /// Other `hyper` error - Other(hyper::error::Error), + Other(http::hyper::error::Error), /// Invalid --ipfs-api-interface InvalidInterface } @@ -80,8 +80,8 @@ impl From<::std::io::Error> for ServerError { } } -impl From for ServerError { - fn from(err: hyper::error::Error) -> ServerError { +impl From for ServerError { + fn from(err: http::hyper::error::Error) -> ServerError { ServerError::Other(err) } } diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 3d79c00fb..df03b6cd7 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -16,14 +16,13 @@ #[macro_use] extern crate mime; -extern crate hyper; extern crate multihash; extern crate cid; extern crate rlp; extern crate ethcore; extern crate ethcore_util as util; -extern crate jsonrpc_http_server; +extern crate jsonrpc_http_server as http; pub mod error; mod route; @@ -33,13 +32,13 @@ use std::sync::Arc; use std::net::{SocketAddr, IpAddr}; use error::ServerError; use route::Out; -use hyper::server::{Listening, Handler, Request, Response}; -use hyper::net::HttpStream; -use hyper::header::{self, Vary, ContentLength, ContentType}; -use hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; +use http::hyper::server::{Listening, Handler, Request, Response}; +use http::hyper::net::HttpStream; +use http::hyper::header::{self, Vary, ContentLength, ContentType}; +use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use ethcore::client::BlockChainClient; -pub use jsonrpc_http_server::{AccessControlAllowOrigin, Host, DomainsValidation}; +pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler pub struct IpfsHandler { @@ -82,14 +81,14 @@ impl Handler for IpfsHandler { } - if !jsonrpc_http_server::is_host_allowed(&req, &self.allowed_hosts) { + if !http::is_host_allowed(&req, &self.allowed_hosts) { self.out = Out::Bad("Disallowed Host header"); return Next::write(); } - let cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); - if cors_header == jsonrpc_http_server::CorsHeader::Invalid { + let cors_header = http::cors_header(&req, &self.cors_domains); + if cors_header == http::CorsHeader::Invalid { self.out = Out::Bad("Disallowed Origin header"); return Next::write(); @@ -209,7 +208,7 @@ pub fn start_server( let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); Ok( - hyper::Server::http(&addr)? + http::hyper::Server::http(&addr)? .handle(move |_| IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())) .map(|(listening, srv)| { diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index fe7afbcf6..fd8555734 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -19,10 +19,10 @@ serde_json = "0.9" time = "0.1" transient-hashmap = "0.1" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-ipc-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-io = { path = "../util/io" } ethcore-ipc = { path = "../ipc/rpc" } diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index a70816f9e..eb4a0ecff 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -1,7 +1,7 @@ [package] -authors = ["Ethcore "] +authors = ["Ethcore "] description = "Parity Rpc Client" -homepage = "http://ethcore.io" +homepage = "http://parity.io" license = "GPL-3.0" name = "parity-rpc-client" version = "1.4.0" @@ -14,8 +14,8 @@ serde = "0.9" serde_json = "0.9" tempdir = "0.3.5" url = "1.2.0" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +ws = { git = "https://github.com/paritytech/ws-rs.git", branch = "mio-upstream-stable" } ethcore-rpc = { path = "../rpc" } ethcore-signer = { path = "../signer" } ethcore-util = { path = "../util" } diff --git a/rpc_client/src/client.rs b/rpc_client/src/client.rs index 5a4568d9e..3ef7ad7ce 100644 --- a/rpc_client/src/client.rs +++ b/rpc_client/src/client.rs @@ -83,18 +83,24 @@ impl Handler for RpcHandler { } fn on_error(&mut self, err: WsError) { match self.complete.take() { - Some(c) => c.complete(Err(RpcError::WsError(err))), - None => println!("unexpected error: {}", err), + Some(c) => match c.send(Err(RpcError::WsError(err))) { + Ok(_) => {}, + Err(_) => warn!(target: "rpc-client", "Unable to notify about error."), + }, + None => warn!(target: "rpc-client", "unexpected error: {}", err), } } fn on_open(&mut self, _: Handshake) -> WsResult<()> { match (self.complete.take(), self.out.take()) { (Some(c), Some(out)) => { - c.complete(Ok(Rpc { + let res = c.send(Ok(Rpc { out: out, counter: AtomicUsize::new(0), pending: self.pending.clone(), })); + if let Err(_) = res { + warn!(target: "rpc-client", "Unable to open a connection.") + } Ok(()) }, _ => { @@ -137,9 +143,9 @@ impl Handler for RpcHandler { } match self.pending.remove(response_id) { - Some(c) => c.complete(ret.map_err(|err| { - RpcError::JsonRpc(err) - })), + Some(c) => if let Err(_) = c.send(ret.map_err(|err| RpcError::JsonRpc(err))) { + warn!(target: "rpc-client", "Unable to send response.") + }, None => warn!( target: "rpc-client", "warning: unexpected id: {}", @@ -225,7 +231,7 @@ impl Rpc { // both fail and succeed. let c = once.take() .expect("connection closure called only once"); - c.complete(Err(RpcError::WsError(err))); + let _ = c.send(Err(RpcError::WsError(err))); }, // c will complete on the `on_open` event in the Handler _ => () diff --git a/signer/Cargo.toml b/signer/Cargo.toml index 95c71b636..075aac9e8 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -12,12 +12,12 @@ rustc_version = "0.1" [dependencies] rand = "0.3.14" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } log = "0.3" env_logger = "0.3" parity-dapps-glue = { version = "1.4", optional = true } -ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" } +ws = { git = "https://github.com/paritytech/ws-rs.git", branch = "mio-upstream-stable" } ethcore-util = { path = "../util" } ethcore-io = { path = "../util/io" } ethcore-rpc = { path = "../rpc" } diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index 201792340..fa418250b 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -11,9 +11,9 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] log = "0.3" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-tcp-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-util = { path = "../util" } ethcore-devtools = { path = "../devtools" } lazy_static = "0.2" From 1d87f247158b9b722d6760aefad3e3caf97d47d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 20 Mar 2017 12:06:42 +0100 Subject: [PATCH 42/91] Bump --- Cargo.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ec811125..3e73e942f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1100,7 +1100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1112,7 +1112,7 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1125,7 +1125,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1137,7 +1137,7 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1147,7 +1147,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1157,7 +1157,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1168,7 +1168,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", From d013a13be66c0e259c069771b083b12a6832d827 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 19:45:52 +0100 Subject: [PATCH 43/91] header_chain writes to database --- ethcore/light/src/client/header_chain.rs | 236 +++++++++++++++++++---- ethcore/light/src/client/mod.rs | 9 +- 2 files changed, 207 insertions(+), 38 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 9dcd25888..676142b17 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -23,9 +23,9 @@ //! This is separate from the `BlockChain` for two reasons: //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. -// TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers` use std::collections::{BTreeMap, HashMap}; +use std::sync::Arc; use cht; @@ -34,7 +34,10 @@ use ethcore::error::BlockError; use ethcore::encoded; use ethcore::header::Header; use ethcore::ids::BlockId; -use util::{H256, U256, HeapSizeOf, Mutex, RwLock}; + +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, View}; +use util::{H256, U256, HeapSizeOf, RwLock}; +use util::kvdb::{DBTransaction, KeyValueDB}; use smallvec::SmallVec; @@ -43,6 +46,9 @@ use smallvec::SmallVec; /// relevant to any blocks we've got in memory. const HISTORY: u64 = 2048; +/// The best block key. Maps to a `u64` best block number. +const BEST_KEY: &'static [u8] = &*b"best_block_key"; + /// Information about a block. #[derive(Debug, Clone)] pub struct BlockDescriptor { @@ -75,39 +81,130 @@ impl HeapSizeOf for Entry { } } +impl Encodable for Entry { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.candidates.len()); + + for candidate in &self.candidates { + s.begin_list(3) + .append(&candidate.hash) + .append(&candidate.parent_hash) + .append(&candidate.total_difficulty); + } + } +} + +impl Decodable for Entry { + fn decode(decoder: &D) -> Result { + let rlp = decoder.as_rlp(); + + let mut candidates = SmallVec::<[Candidate; 3]>::new(); + + for item in rlp.iter() { + candidates.push(Candidate { + hash: item.val_at(0)?, + parent_hash: item.val_at(1)?, + total_difficulty: item.val_at(2)?, + }) + } + + if candidates.is_empty() { return Err(DecoderError::Custom("Empty candidates vector submitted.")) } + + // rely on the invariant that the canonical entry is always first. + let canon_hash = candidates[0].hash; + Ok(Entry { + candidates: candidates, + canonical_hash: canon_hash, + }) + } +} + +fn cht_key(number: u64) -> String { + format!("canonical_{}", number) +} + +fn era_key(number: u64) -> String { + format!("candidates_{}", number) +} + /// Header chain. See module docs for more details. pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. candidates: RwLock>, headers: RwLock>, best_block: RwLock, - cht_roots: Mutex>, + db: Arc, + col: Option, } impl HeaderChain { - /// Create a new header chain given this genesis block. - pub fn new(genesis: &[u8]) -> Self { + /// Create a new header chain given this genesis block and database to read from. + pub fn new(db: Arc, col: Option, genesis: &[u8]) -> Result { use ethcore::views::HeaderView; - let g_view = HeaderView::new(genesis); + let chain = if let Some(best_number) = db.get(col, BEST_KEY)?.map(|x| ::rlp::decode(&x)) { + let mut cur_number = best_number; + let mut candidates = BTreeMap::new(); + let mut headers = HashMap::new(); - HeaderChain { - genesis_header: encoded::Header::new(genesis.to_owned()), - best_block: RwLock::new(BlockDescriptor { - hash: g_view.hash(), - number: 0, - total_difficulty: g_view.difficulty(), - }), - candidates: RwLock::new(BTreeMap::new()), - headers: RwLock::new(HashMap::new()), - cht_roots: Mutex::new(Vec::new()), - } + // load all era entries and referenced headers within them. + while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { + let entry: Entry = ::rlp::decode(&entry); + for candidate in &entry.candidates { + match db.get(col, &*candidate.hash)? { + Some(hdr) => headers.insert(candidate.hash, encoded::Header::new(hdr.to_vec())), + None => return Err(format!("Database missing referenced header: {}", candidate.hash)), + }; + } + candidates.insert(cur_number, entry); + + cur_number -= 1; + } + + // fill best block block descriptor. + if candidates.is_empty() { return Err(format!("Database corrupt: best block referenced but no data.")) } + let best_block = { + let era = candidates.get(&best_number) + .expect("candidates non-empty; filled in loop starting at best_number; qed"); + let best = &era.candidates[0]; + BlockDescriptor { + hash: best.hash, + number: best_number, + total_difficulty: best.total_difficulty, + } + }; + + HeaderChain { + genesis_header: encoded::Header::new(genesis.to_owned()), + best_block: RwLock::new(best_block), + candidates: RwLock::new(candidates), + headers: RwLock::new(headers), + db: db, + col: col, + } + } else { + let g_view = HeaderView::new(genesis); + HeaderChain { + genesis_header: encoded::Header::new(genesis.to_owned()), + best_block: RwLock::new(BlockDescriptor { + hash: g_view.hash(), + number: 0, + total_difficulty: g_view.difficulty(), + }), + candidates: RwLock::new(BTreeMap::new()), + headers: RwLock::new(HashMap::new()), + db: db, + col: col, + } + }; + + Ok(chain) } /// Insert a pre-verified header. /// /// This blindly trusts that the data given to it is sensible. - pub fn insert(&self, header: Header) -> Result<(), BlockError> { + pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result<(), BlockError> { let hash = header.hash(); let number = header.number(); let parent_hash = *header.parent_hash(); @@ -129,15 +226,19 @@ impl HeaderChain { let total_difficulty = parent_td + *header.difficulty(); // insert headers and candidates entries. - candidates.entry(number).or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }) - .candidates.push(Candidate { + { + let cur_era = candidates.entry(number) + .or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }); + cur_era.candidates.push(Candidate { hash: hash, parent_hash: parent_hash, total_difficulty: total_difficulty, - }); + }); + } - let raw = ::rlp::encode(&header).to_vec(); - self.headers.write().insert(hash, encoded::Header::new(raw)); + let raw = ::rlp::encode(&header); + transaction.put(self.col, &hash[..], &*raw); + self.headers.write().insert(hash, encoded::Header::new(raw.to_vec())); // reorganize ancestors so canonical entries are first in their // respective candidates vectors. @@ -160,6 +261,10 @@ impl HeaderChain { // what about reorgs > cht::SIZE + HISTORY? // resetting to the last block of a given CHT should be possible. canon_hash = entry.candidates[0].parent_hash; + + // write altered era to disk. + let rlp_era = ::rlp::encode(&*entry); + transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); } trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); @@ -168,13 +273,13 @@ impl HeaderChain { number: number, total_difficulty: total_difficulty, }; + transaction.put(self.col, BEST_KEY, &*::rlp::encode(&number)); // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); if earliest_era + HISTORY + cht::SIZE <= number { let cht_num = cht::block_to_cht_number(earliest_era) .expect("fails only for number == 0; genesis never imported; qed"); - debug_assert_eq!(cht_num as usize, self.cht_roots.lock().len()); let mut headers = self.headers.write(); @@ -186,10 +291,13 @@ impl HeaderChain { let iter = || { let era_entry = candidates.remove(&i) .expect("all eras are sequential with no gaps; qed"); + transaction.delete(self.col, era_key(i).as_bytes()); + i += 1; for ancient in &era_entry.candidates { headers.remove(&ancient.hash); + transaction.delete(self.col, &ancient.hash); } let canon = &era_entry.candidates[0]; @@ -199,9 +307,9 @@ impl HeaderChain { .expect("fails only when too few items; this is checked; qed") }; + // write the CHT root to the database. debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); - - self.cht_roots.lock().push(cht_root); + transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root)); } } @@ -257,7 +365,13 @@ impl HeaderChain { /// This is because it's assumed that the genesis hash is known, /// so including it within a CHT would be redundant. pub fn cht_root(&self, n: usize) -> Option { - self.cht_roots.lock().get(n).map(|h| h.clone()) + match self.db.get(self.col, cht_key(n as u64).as_bytes()) { + Ok(val) => val.map(|x| ::rlp::decode(&x)), + Err(e) => { + warn!(target: "chain", "Error reading from database: {}", e); + None + } + } } /// Get the genesis hash. @@ -297,8 +411,7 @@ impl HeaderChain { impl HeapSizeOf for HeaderChain { fn heap_size_of_children(&self) -> usize { self.candidates.read().heap_size_of_children() + - self.headers.read().heap_size_of_children() + - self.cht_roots.lock().heap_size_of_children() + self.headers.read().heap_size_of_children() } } @@ -324,16 +437,23 @@ impl<'a> Iterator for AncestryIter<'a> { #[cfg(test)] mod tests { use super::HeaderChain; + use std::sync::Arc; + use ethcore::ids::BlockId; use ethcore::header::Header; use ethcore::spec::Spec; + fn make_db() -> Arc<::util::KeyValueDB> { + Arc::new(::util::kvdb::in_memory(0)) + } + #[test] fn basic_chain() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); + let db = make_db(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -345,7 +465,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 10; } @@ -361,7 +483,8 @@ mod tests { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let db = make_db(); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -373,7 +496,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 10; } @@ -389,7 +514,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 10; } @@ -410,7 +537,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * (i * i).into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 11; } @@ -432,11 +561,46 @@ mod tests { fn earliest_is_latest() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); + let db = make_db(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Latest).is_some()); assert!(chain.block_header(BlockId::Pending).is_some()); } + + #[test] + fn restore_from_db() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + { + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + + rolling_timestamp += 10; + } + } + + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + assert!(chain.block_header(BlockId::Number(10)).is_none()); + assert!(chain.block_header(BlockId::Number(9000)).is_some()); + assert!(chain.cht_root(2).is_some()); + assert!(chain.cht_root(3).is_none()); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999); + } } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index c791caed1..23242f407 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -111,10 +111,14 @@ pub struct Client { impl Client { /// Create a new `Client`. pub fn new(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { + // TODO: use real DB. + let db = ::util::kvdb::in_memory(0); + let gh = ::rlp::encode(&spec.genesis_header()); + Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), engine: spec.engine.clone(), - chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())), + chain: HeaderChain::new(Arc::new(db), None, &gh).expect("new db every time"), report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), } @@ -201,7 +205,8 @@ impl Client { for verified_header in self.queue.drain(MAX) { let (num, hash) = (verified_header.number(), verified_header.hash()); - match self.chain.insert(verified_header) { + let mut tx = unimplemented!(); + match self.chain.insert(&mut tx, verified_header) { Ok(()) => { good.push(hash); self.report.write().blocks_imported += 1; From 21771aa1a6542783833307168128b024fb994b88 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 20:23:58 +0100 Subject: [PATCH 44/91] don't keep headers in memory to avoid DoS --- ethcore/light/src/client/header_chain.rs | 153 +++++++++++++++++------ 1 file changed, 118 insertions(+), 35 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 676142b17..25c836051 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -24,7 +24,7 @@ //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::sync::Arc; use cht; @@ -35,7 +35,7 @@ use ethcore::encoded; use ethcore::header::Header; use ethcore::ids::BlockId; -use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, View}; +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Rlp, View}; use util::{H256, U256, HeapSizeOf, RwLock}; use util::kvdb::{DBTransaction, KeyValueDB}; @@ -46,8 +46,8 @@ use smallvec::SmallVec; /// relevant to any blocks we've got in memory. const HISTORY: u64 = 2048; -/// The best block key. Maps to a `u64` best block number. -const BEST_KEY: &'static [u8] = &*b"best_block_key"; +/// The best block key. Maps to an RLP list: [best_era, last_era] +const CURRENT_KEY: &'static [u8] = &*b"best_and_latest"; /// Information about a block. #[derive(Debug, Clone)] @@ -131,7 +131,6 @@ fn era_key(number: u64) -> String { pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. candidates: RwLock>, - headers: RwLock>, best_block: RwLock, db: Arc, col: Option, @@ -142,30 +141,33 @@ impl HeaderChain { pub fn new(db: Arc, col: Option, genesis: &[u8]) -> Result { use ethcore::views::HeaderView; - let chain = if let Some(best_number) = db.get(col, BEST_KEY)?.map(|x| ::rlp::decode(&x)) { - let mut cur_number = best_number; + let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { + let (best_number, highest_number) = { + let rlp = Rlp::new(¤t); + (rlp.val_at(0), rlp.val_at(1)) + }; + + let mut cur_number = highest_number; let mut candidates = BTreeMap::new(); - let mut headers = HashMap::new(); // load all era entries and referenced headers within them. while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { let entry: Entry = ::rlp::decode(&entry); - for candidate in &entry.candidates { - match db.get(col, &*candidate.hash)? { - Some(hdr) => headers.insert(candidate.hash, encoded::Header::new(hdr.to_vec())), - None => return Err(format!("Database missing referenced header: {}", candidate.hash)), - }; - } + trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", + cur_number, entry.candidates.len()); + candidates.insert(cur_number, entry); cur_number -= 1; } // fill best block block descriptor. - if candidates.is_empty() { return Err(format!("Database corrupt: best block referenced but no data.")) } let best_block = { - let era = candidates.get(&best_number) - .expect("candidates non-empty; filled in loop starting at best_number; qed"); + let era = match candidates.get(&best_number) { + Some(era) => era, + None => return Err(format!("Database corrupt: highest block referenced but no data.")), + }; + let best = &era.candidates[0]; BlockDescriptor { hash: best.hash, @@ -178,7 +180,6 @@ impl HeaderChain { genesis_header: encoded::Header::new(genesis.to_owned()), best_block: RwLock::new(best_block), candidates: RwLock::new(candidates), - headers: RwLock::new(headers), db: db, col: col, } @@ -192,7 +193,6 @@ impl HeaderChain { total_difficulty: g_view.difficulty(), }), candidates: RwLock::new(BTreeMap::new()), - headers: RwLock::new(HashMap::new()), db: db, col: col, } @@ -225,7 +225,7 @@ impl HeaderChain { let total_difficulty = parent_td + *header.difficulty(); - // insert headers and candidates entries. + // insert headers and candidates entries and write era to disk. { let cur_era = candidates.entry(number) .or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }); @@ -234,15 +234,32 @@ impl HeaderChain { parent_hash: parent_hash, total_difficulty: total_difficulty, }); + + // fix ordering of era before writing. + if total_difficulty > cur_era.candidates[0].total_difficulty { + let cur_pos = cur_era.candidates.len() - 1; + cur_era.candidates.swap(cur_pos, 0); + cur_era.canonical_hash = hash; + } + + transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era)) } let raw = ::rlp::encode(&header); transaction.put(self.col, &hash[..], &*raw); - self.headers.write().insert(hash, encoded::Header::new(raw.to_vec())); + + let (best_num, is_new_best) = { + let cur_best = self.best_block.read(); + if cur_best.total_difficulty < total_difficulty { + (number, true) + } else { + (cur_best.number, false) + } + }; // reorganize ancestors so canonical entries are first in their // respective candidates vectors. - if self.best_block.read().total_difficulty < total_difficulty { + if is_new_best { let mut canon_hash = hash; for (&height, entry) in candidates.iter_mut().rev().skip_while(|&(height, _)| *height > number) { if height != number && entry.canonical_hash == canon_hash { break; } @@ -262,9 +279,11 @@ impl HeaderChain { // resetting to the last block of a given CHT should be possible. canon_hash = entry.candidates[0].parent_hash; - // write altered era to disk. - let rlp_era = ::rlp::encode(&*entry); - transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); + // write altered era to disk + if height != number { + let rlp_era = ::rlp::encode(&*entry); + transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); + } } trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); @@ -273,7 +292,6 @@ impl HeaderChain { number: number, total_difficulty: total_difficulty, }; - transaction.put(self.col, BEST_KEY, &*::rlp::encode(&number)); // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); @@ -281,8 +299,6 @@ impl HeaderChain { let cht_num = cht::block_to_cht_number(earliest_era) .expect("fails only for number == 0; genesis never imported; qed"); - let mut headers = self.headers.write(); - let cht_root = { let mut i = earliest_era; @@ -296,7 +312,6 @@ impl HeaderChain { i += 1; for ancient in &era_entry.candidates { - headers.remove(&ancient.hash); transaction.delete(self.col, &ancient.hash); } @@ -313,20 +328,37 @@ impl HeaderChain { } } + // write the best and latest eras to the database. + { + let latest_num = *candidates.iter().rev().next().expect("at least one era just inserted; qed").0; + let mut stream = RlpStream::new_list(2); + stream.append(&best_num).append(&latest_num); + transaction.put(self.col, CURRENT_KEY, &stream.out()) + } Ok(()) } /// Get a block header. In the case of query by number, only canonical blocks /// will be returned. pub fn block_header(&self, id: BlockId) -> Option { + let load_from_db = |hash: H256| { + match self.db.get(self.col, &hash) { + Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new), + Err(e) => { + warn!(target: "chain", "Failed to read from database: {}", e); + None + } + } + }; + match id { BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), - BlockId::Hash(hash) => self.headers.read().get(&hash).cloned(), + BlockId::Hash(hash) => load_from_db(hash), BlockId::Number(num) => { if self.best_block.read().number < num { return None } self.candidates.read().get(&num).map(|entry| entry.canonical_hash) - .and_then(|hash| self.headers.read().get(&hash).cloned()) + .and_then(load_from_db) } BlockId::Latest | BlockId::Pending => { let hash = { @@ -338,7 +370,7 @@ impl HeaderChain { best.hash }; - self.headers.read().get(&hash).cloned() + load_from_db(hash) } } } @@ -401,7 +433,7 @@ impl HeaderChain { /// Get block status. pub fn status(&self, hash: &H256) -> BlockStatus { - match self.headers.read().contains_key(hash) { + match self.db.get(self.col, &*hash).ok().map_or(false, |x| x.is_some()) { true => BlockStatus::InChain, false => BlockStatus::Unknown, } @@ -410,8 +442,7 @@ impl HeaderChain { impl HeapSizeOf for HeaderChain { fn heap_size_of_children(&self) -> usize { - self.candidates.read().heap_size_of_children() + - self.headers.read().heap_size_of_children() + self.candidates.read().heap_size_of_children() } } @@ -603,4 +634,56 @@ mod tests { assert!(chain.cht_root(3).is_none()); assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999); } + + #[test] + fn restore_higher_non_canonical() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + { + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + + // push 100 low-difficulty blocks. + for i in 1..101 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + + rolling_timestamp += 10; + } + + // push fewer high-difficulty blocks. + for i in 1..11 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into() * 1000.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + + rolling_timestamp += 10; + } + + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + } + + // after restoration, non-canonical eras should still be loaded. + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + assert!(chain.candidates.read().get(&100).is_some()) + } } From bc9c1d482417bb47a83a56699b71df0a41f68518 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 20:57:13 +0100 Subject: [PATCH 45/91] use a database in ethcore-light --- Cargo.lock | 1 + ethcore/light/Cargo.toml | 1 + ethcore/light/src/client/mod.rs | 39 ++++++++++++++++----- ethcore/light/src/client/service.rs | 54 ++++++++++++++++++++++++++--- ethcore/light/src/lib.rs | 3 ++ ethcore/src/client/config.rs | 2 +- ethcore/src/db.rs | 4 ++- 7 files changed, 88 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6cf05cb0..05d57cbdd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -550,6 +550,7 @@ name = "ethcore-light" version = "1.7.0" dependencies = [ "ethcore 1.7.0", + "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 6f95d8a0e..78210904e 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -17,6 +17,7 @@ ethcore-util = { path = "../../util" } ethcore-network = { path = "../../util/network" } ethcore-io = { path = "../../util/io" } ethcore-ipc = { path = "../../ipc/rpc", optional = true } +ethcore-devtools = { path = "../../devtools" } rlp = { path = "../../util/rlp" } time = "0.1" smallvec = "0.3.1" diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 23242f407..a393130b1 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; -use ethcore::client::{ClientReport, EnvInfo}; +use ethcore::client::{ClientReport, EnvInfo, DatabaseCompactionProfile}; use ethcore::engines::Engine; use ethcore::ids::BlockId; use ethcore::header::Header; @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{H256, Mutex, RwLock}; +use util::{H256, Mutex, RwLock, KeyValueDB}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -45,6 +45,14 @@ mod service; pub struct Config { /// Verification queue config. pub queue: queue::Config, + /// Chain column in database. + pub chain_column: Option, + /// Database cache size. `None` => rocksdb default. + pub db_cache_size: Option, + /// State db compaction profile + pub db_compaction: DatabaseCompactionProfile, + /// Should db have WAL enabled? + pub db_wal: bool, } /// Trait for interacting with the header chain abstractly. @@ -106,22 +114,30 @@ pub struct Client { chain: HeaderChain, report: RwLock, import_lock: Mutex<()>, + db: Arc, } impl Client { /// Create a new `Client`. - pub fn new(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { - // TODO: use real DB. - let db = ::util::kvdb::in_memory(0); + pub fn new(config: Config, db: Arc, chain_col: Option, spec: &Spec, io_channel: IoChannel) -> Result { let gh = ::rlp::encode(&spec.genesis_header()); - Client { + Ok(Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), engine: spec.engine.clone(), - chain: HeaderChain::new(Arc::new(db), None, &gh).expect("new db every time"), + chain: HeaderChain::new(db.clone(), chain_col, &gh)?, report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), - } + db: db, + }) + } + + /// Create a new `Client` backed purely in-memory. + /// This will ignore all database options in the configuration. + pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { + let db = ::util::kvdb::in_memory(0); + + Client::new(config, Arc::new(db), None, spec, io_channel).expect("New DB creation infallible; qed") } /// Import a header to the queue for additional verification. @@ -205,7 +221,7 @@ impl Client { for verified_header in self.queue.drain(MAX) { let (num, hash) = (verified_header.number(), verified_header.hash()); - let mut tx = unimplemented!(); + let mut tx = self.db.transaction(); match self.chain.insert(&mut tx, verified_header) { Ok(()) => { good.push(hash); @@ -216,6 +232,11 @@ impl Client { bad.push(hash); } } + self.db.write_buffered(tx); + + if let Err(e) = self.db.flush() { + panic!("Database flush failed: {}. Check disk health and space.", e); + } } self.queue.mark_as_bad(&bad); diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index fe7caee94..f7a4d41a9 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -17,14 +17,36 @@ //! Minimal IO service for light client. //! Just handles block import messages and passes them to the client. +use std::fmt; +use std::path::Path; use std::sync::Arc; +use ethcore::db; use ethcore::service::ClientIoMessage; use ethcore::spec::Spec; use io::{IoContext, IoError, IoHandler, IoService}; +use util::kvdb::{Database, DatabaseConfig}; use super::{Client, Config as ClientConfig}; +/// Errors on service initialization. +#[derive(Debug)] +pub enum Error { + /// Database error. + Database(String), + /// I/O service error. + Io(IoError), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Database(ref msg) => write!(f, "Database error: {}", msg), + Error::Io(ref err) => write!(f, "I/O service error: {}", err), + } + } +} + /// Light client service. pub struct Service { client: Arc, @@ -33,11 +55,31 @@ pub struct Service { impl Service { /// Start the service: initialize I/O workers and client itself. - pub fn start(config: ClientConfig, spec: &Spec) -> Result { - let io_service = try!(IoService::::start()); - let client = Arc::new(Client::new(config, spec, io_service.channel())); - try!(io_service.register_handler(Arc::new(ImportBlocks(client.clone())))); + pub fn start(config: ClientConfig, spec: &Spec, path: &Path) -> Result { + // initialize database. + let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); + // give all rocksdb cache to the header chain column. + if let Some(size) = config.db_cache_size { + db_config.set_cache(db::COL_LIGHT_CHAIN, size); + } + + db_config.compaction = config.db_compaction.compaction_profile(path); + db_config.wal = config.db_wal; + + let db = Arc::new(Database::open( + &db_config, + &path.to_str().expect("DB path could not be converted to string.") + ).map_err(Error::Database)?); + + let io_service = IoService::::start().map_err(Error::Io)?; + let client = Arc::new(Client::new(config, + db, + db::COL_LIGHT_CHAIN, + spec, + io_service.channel(), + ).map_err(Error::Database)?); + io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; Ok(Service { client: client, _io_service: io_service, @@ -63,11 +105,13 @@ impl IoHandler for ImportBlocks { #[cfg(test)] mod tests { use super::Service; + use devtools::RandomTempPath; use ethcore::spec::Spec; #[test] fn it_works() { let spec = Spec::new_test(); - Service::start(Default::default(), &spec).unwrap(); + let temp_path = RandomTempPath::new(); + Service::start(Default::default(), &spec, temp_path.as_path()).unwrap(); } } diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ada58d8de..828d77043 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -76,3 +76,6 @@ extern crate stats; #[cfg(feature = "ipc")] extern crate ethcore_ipc as ipc; + +#[cfg(test)] +extern crate ethcore_devtools as devtools; diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 5c7cf9471..b58ae83cb 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -26,7 +26,7 @@ use verification::{VerifierType, QueueConfig}; use util::{journaldb, CompactionProfile}; /// Client state db compaction profile -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum DatabaseCompactionProfile { /// Try to determine compaction profile automatically Auto, diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index 4e8da714d..bccb8e943 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -38,8 +38,10 @@ pub const COL_TRACE: Option = Some(4); pub const COL_ACCOUNT_BLOOM: Option = Some(5); /// Column for general information from the local node which can persist. pub const COL_NODE_INFO: Option = Some(6); +/// Column for the light client chain. +pub const COL_LIGHT_CHAIN: Option = Some(7); /// Number of columns in DB -pub const NUM_COLUMNS: Option = Some(7); +pub const NUM_COLUMNS: Option = Some(8); /// Modes for updating caches. #[derive(Clone, Copy)] From dd1f8295c4c1ab5fe155e9435dd5607728057056 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 21:00:31 +0100 Subject: [PATCH 46/91] fix sync test compilation --- sync/src/light_sync/tests/test_net.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index 898f8766d..2319e8d35 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -207,7 +207,7 @@ impl TestNet { pub fn light(n_light: usize, n_full: usize) -> Self { let mut peers = Vec::with_capacity(n_light + n_full); for _ in 0..n_light { - let client = LightClient::new(Default::default(), &Spec::new_test(), IoChannel::disconnected()); + let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected()); peers.push(Arc::new(Peer::new_light(Arc::new(client)))) } From dd1a3fc60ac1c67b89004a64ab1076c0e4f07f7f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 15:58:43 +0100 Subject: [PATCH 47/91] migration to light client mode --- ethcore/src/migrations/mod.rs | 19 +++++++++++++++++-- ethcore/src/migrations/v11.rs | 26 -------------------------- 2 files changed, 17 insertions(+), 28 deletions(-) delete mode 100644 ethcore/src/migrations/v11.rs diff --git a/ethcore/src/migrations/mod.rs b/ethcore/src/migrations/mod.rs index 6cc4a13a8..76b10fd19 100644 --- a/ethcore/src/migrations/mod.rs +++ b/ethcore/src/migrations/mod.rs @@ -16,6 +16,8 @@ //! Database migrations. +use util::migration::ChangeColumns; + pub mod state; pub mod blocks; pub mod extras; @@ -27,5 +29,18 @@ pub use self::v9::Extract; mod v10; pub use self::v10::ToV10; -mod v11; -pub use self::v11::TO_V11; +/// The migration from v10 to v11. +/// Adds a column for node info. +pub const TO_V11: ChangeColumns = ChangeColumns { + pre_columns: Some(6), + post_columns: Some(7), + version: 11, +}; + +/// The migration from v11 to v12. +/// Adds a column for light chain storage. +pub const TO_V12: ChangeColumns = ChangeColumns { + pre_columns: Some(7), + post_columns: Some(8), + version: 12, +}; diff --git a/ethcore/src/migrations/v11.rs b/ethcore/src/migrations/v11.rs deleted file mode 100644 index e33de6170..000000000 --- a/ethcore/src/migrations/v11.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Adds a seventh column for node information. - -use util::migration::ChangeColumns; - -/// The migration from v10 to v11. -pub const TO_V11: ChangeColumns = ChangeColumns { - pre_columns: Some(6), - post_columns: Some(7), - version: 11, -}; From a9d75e222311ed1b505b282c50785e0f5fe5603c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 16:45:50 +0100 Subject: [PATCH 48/91] CLI options for light client --- parity/cli/config.full.toml | 2 +- parity/cli/mod.rs | 9 +++++++++ parity/cli/usage.txt | 5 +++++ parity/configuration.rs | 4 ++++ parity/run.rs | 3 +++ rpc/src/v1/tests/helpers/sync_provider.rs | 4 ++-- rpc/src/v1/types/mod.rs | 2 +- rpc/src/v1/types/sync.rs | 18 +++++++++--------- sync/src/api.rs | 18 +++++++++--------- 9 files changed, 43 insertions(+), 22 deletions(-) diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index 6800ec2dc..4ddf8eaa7 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -38,7 +38,7 @@ warp = true allow_ips = "all" snapshot_peers = 0 max_pending_peers = 64 -serve_light = true +no_serve_light = false reserved_only = false reserved_peers = "./path_to_file" diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index b346cb9d8..6fb15dbe6 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -93,6 +93,7 @@ usage! { flag_chain: String = "foundation", or |c: &Config| otry!(c.parity).chain.clone(), flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), + flag_light: bool = false, or |c: &Config| otry!(c.parity).light, // -- Account Options flag_unlock: Option = None, @@ -148,6 +149,8 @@ usage! { flag_reserved_only: bool = false, or |c: &Config| otry!(c.network).reserved_only.clone(), flag_no_ancient_blocks: bool = false, or |_| None, + flag_no_serve_light: bool = false, + or |c: &Config| otry!(c.network).no_serve_light.clone(), // -- API and Console Options // RPC @@ -372,6 +375,7 @@ struct Operating { db_path: Option, keys_path: Option, identity: Option, + light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -407,6 +411,7 @@ struct Network { node_key: Option, reserved_peers: Option, reserved_only: Option, + no_serve_light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -630,6 +635,7 @@ mod tests { flag_db_path: Some("$HOME/.parity/chains".into()), flag_keys_path: "$HOME/.parity/keys".into(), flag_identity: "".into(), + flag_light: false, // -- Account Options flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), @@ -660,6 +666,7 @@ mod tests { flag_reserved_peers: Some("./path_to_file".into()), flag_reserved_only: false, flag_no_ancient_blocks: false, + flag_no_serve_light: false, // -- API and Console Options // RPC @@ -832,6 +839,7 @@ mod tests { db_path: None, keys_path: None, identity: None, + light: None, }), account: Some(Account { unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), @@ -861,6 +869,7 @@ mod tests { node_key: None, reserved_peers: Some("./path/to/reserved_peers".into()), reserved_only: Some(true), + no_serve_light: None, }), rpc: Some(Rpc { disable: Some(true), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 322543607..55f64b018 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -67,6 +67,10 @@ Operating Options: --keys-path PATH Specify the path for JSON key files to be found (default: {flag_keys_path}). --identity NAME Specify your node's name. (default: {flag_identity}) + --light Experimental: run in light client mode. Light clients + synchronize a bare minimum of data and fetch necessary + data on-demand from the network. Much lower in storage, + potentially higher in bandwidth. (default: {flag_light}) Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. @@ -126,6 +130,7 @@ Networking Options: --max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers}) --no-ancient-blocks Disable downloading old blocks after snapshot restoration or warp sync. (default: {flag_no_ancient_blocks}) + --no-serve-light Disable serving of light peers. (default: {flag_no_serve_light}) API and Console Options: --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc}) diff --git a/parity/configuration.rs b/parity/configuration.rs index 9b491143d..1418f372c 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -375,6 +375,8 @@ impl Configuration { check_seal: !self.args.flag_no_seal_check, download_old_blocks: !self.args.flag_no_ancient_blocks, verifier_settings: verifier_settings, + serve_light: !self.args.flag_no_serve_light, + light: self.args.flag_light, }; Cmd::Run(run_cmd) }; @@ -1194,6 +1196,8 @@ mod tests { check_seal: true, download_old_blocks: true, verifier_settings: Default::default(), + serve_light: true, + light: false, }; expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected)); diff --git a/parity/run.rs b/parity/run.rs index 7fe1ad273..a00a5c03c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -107,6 +107,8 @@ pub struct RunCmd { pub check_seal: bool, pub download_old_blocks: bool, pub verifier_settings: VerifierSettings, + pub serve_light: bool, + pub light: bool, } pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { @@ -248,6 +250,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R sync_config.fork_block = spec.fork_block(); sync_config.warp_sync = cmd.warp_sync; sync_config.download_old_blocks = cmd.download_old_blocks; + sync_config.serve_light = cmd.serve_light; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index fe2ae3f59..83c7db015 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -83,7 +83,7 @@ impl SyncProvider for TestSyncProvider { difficulty: Some(40.into()), head: 50.into(), }), - les_info: None, + pip_info: None, }, PeerInfo { id: None, @@ -96,7 +96,7 @@ impl SyncProvider for TestSyncProvider { difficulty: None, head: 60.into() }), - les_info: None, + pip_info: None, } ] } diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index a4bfcb41f..78eee5137 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -63,7 +63,7 @@ pub use self::receipt::Receipt; pub use self::rpc_settings::RpcSettings; pub use self::sync::{ SyncStatus, SyncInfo, Peers, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, - TransactionStats, ChainStatus, EthProtocolInfo, LesProtocolInfo, + TransactionStats, ChainStatus, EthProtocolInfo, PipProtocolInfo, }; pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; diff --git a/rpc/src/v1/types/sync.rs b/rpc/src/v1/types/sync.rs index d83a3a64c..813fe8cb3 100644 --- a/rpc/src/v1/types/sync.rs +++ b/rpc/src/v1/types/sync.rs @@ -83,8 +83,8 @@ pub struct PeerNetworkInfo { pub struct PeerProtocolsInfo { /// Ethereum protocol information pub eth: Option, - /// LES protocol information. - pub les: Option, + /// PIP protocol information. + pub pip: Option, } /// Peer Ethereum protocol information @@ -108,10 +108,10 @@ impl From for EthProtocolInfo { } } -/// Peer LES protocol information +/// Peer PIP protocol information #[derive(Default, Debug, Serialize)] -pub struct LesProtocolInfo { - /// Negotiated LES protocol version +pub struct PipProtocolInfo { + /// Negotiated PIP protocol version pub version: u32, /// Peer total difficulty pub difficulty: U256, @@ -119,9 +119,9 @@ pub struct LesProtocolInfo { pub head: String, } -impl From for LesProtocolInfo { - fn from(info: ethsync::LesProtocolInfo) -> Self { - LesProtocolInfo { +impl From for PipProtocolInfo { + fn from(info: ethsync::PipProtocolInfo) -> Self { + PipProtocolInfo { version: info.version, difficulty: info.difficulty.into(), head: info.head.hex(), @@ -171,7 +171,7 @@ impl From for PeerInfo { }, protocols: PeerProtocolsInfo { eth: p.eth_info.map(Into::into), - les: p.les_info.map(Into::into), + pip: p.pip_info.map(Into::into), }, } } diff --git a/sync/src/api.rs b/sync/src/api.rs index 4cdc9d37a..6feba062d 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -126,7 +126,7 @@ pub struct PeerInfo { /// Eth protocol info. pub eth_info: Option, /// Light protocol info. - pub les_info: Option, + pub pip_info: Option, } /// Ethereum protocol info. @@ -141,10 +141,10 @@ pub struct EthProtocolInfo { pub difficulty: Option, } -/// LES protocol info. +/// PIP protocol info. #[derive(Debug)] #[cfg_attr(feature = "ipc", derive(Binary))] -pub struct LesProtocolInfo { +pub struct PipProtocolInfo { /// Protocol version pub version: u32, /// SHA3 of peer best block hash @@ -153,9 +153,9 @@ pub struct LesProtocolInfo { pub difficulty: U256, } -impl From for LesProtocolInfo { +impl From for PipProtocolInfo { fn from(status: light_net::Status) -> Self { - LesProtocolInfo { + PipProtocolInfo { version: status.protocol_version, head: status.head_hash, difficulty: status.head_td, @@ -184,7 +184,7 @@ pub struct EthSync { network: NetworkService, /// Main (eth/par) protocol handler eth_handler: Arc, - /// Light (les) protocol handler + /// Light (pip) protocol handler light_proto: Option>, /// The main subprotocol name subprotocol_name: [u8; 3], @@ -264,7 +264,7 @@ impl SyncProvider for EthSync { remote_address: session_info.remote_address, local_address: session_info.local_address, eth_info: eth_sync.peer_info(&peer_id), - les_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into), + pip_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into), }) }).collect() }).unwrap_or_else(Vec::new) @@ -408,7 +408,7 @@ impl ChainNotify for EthSync { } } -/// LES event handler. +/// PIP event handler. /// Simply queues transactions from light client peers. struct TxRelay(Arc); @@ -786,7 +786,7 @@ impl LightSyncProvider for LightSync { remote_address: session_info.remote_address, local_address: session_info.local_address, eth_info: None, - les_info: self.proto.peer_status(&peer_id).map(Into::into), + pip_info: self.proto.peer_status(&peer_id).map(Into::into), }) }).collect() }).unwrap_or_else(Vec::new) From c718b5618ec14491299368a0e3806708a98708cc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 18:32:04 +0100 Subject: [PATCH 49/91] initial light CLI --- ethcore/light/src/client/mod.rs | 7 ++- ethcore/light/src/client/service.rs | 2 +- parity/cli/usage.txt | 3 +- parity/migration.rs | 3 +- parity/run.rs | 94 ++++++++++++++++++++++++++++- sync/src/api.rs | 2 +- 6 files changed, 101 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index a393130b1..92866da6d 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; -use ethcore::client::{ClientReport, EnvInfo, DatabaseCompactionProfile}; +use ethcore::client::{ClientReport, EnvInfo}; use ethcore::engines::Engine; use ethcore::ids::BlockId; use ethcore::header::Header; @@ -31,7 +31,8 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{H256, Mutex, RwLock, KeyValueDB}; +use util::{H256, Mutex, RwLock}; +use util::kvdb::{KeyValueDB, CompactionProfile}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -50,7 +51,7 @@ pub struct Config { /// Database cache size. `None` => rocksdb default. pub db_cache_size: Option, /// State db compaction profile - pub db_compaction: DatabaseCompactionProfile, + pub db_compaction: CompactionProfile, /// Should db have WAL enabled? pub db_wal: bool, } diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index f7a4d41a9..89538fec2 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -64,7 +64,7 @@ impl Service { db_config.set_cache(db::COL_LIGHT_CHAIN, size); } - db_config.compaction = config.db_compaction.compaction_profile(path); + db_config.compaction = config.db_compaction; db_config.wal = config.db_wal; let db = Arc::new(Database::open( diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 55f64b018..11d5c6e54 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -70,7 +70,8 @@ Operating Options: --light Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary data on-demand from the network. Much lower in storage, - potentially higher in bandwidth. (default: {flag_light}) + potentially higher in bandwidth. Has no effect with + subcommands (default: {flag_light}) Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. diff --git a/parity/migration.rs b/parity/migration.rs index 445724325..c4e5f5ac6 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -30,7 +30,7 @@ use ethcore::migrations::Extract; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. -const CURRENT_VERSION: u32 = 11; +const CURRENT_VERSION: u32 = 12; /// First version of the consolidated database. const CONSOLIDATION_VERSION: u32 = 9; /// Defines how many items are migrated to the new version of database at once. @@ -147,6 +147,7 @@ fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> R let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?; + manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?; Ok(manager) } diff --git a/parity/run.rs b/parity/run.rs index a00a5c03c..92aba9ed7 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -154,6 +154,89 @@ impl ::local_store::NodeInfo for FullNodeInfo { } } +// helper for light execution. +fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) -> Result<(bool, Option), String> { + use light::client as light_client; + use ethsync::{LightSyncParams, LightSync, ManageNetwork}; + use util::RwLock; + + let panic_handler = PanicHandler::new_in_arc(); + + // load spec + let spec = cmd.spec.spec()?; + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + + let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()); + + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?; + + // create dirs used by parity + cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled, cmd.secretstore_conf.enabled)?; + + info!("Starting {}", Colour::White.bold().paint(version())); + info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client")); + + // start client and create transaction queue. + let mut config = light_client::Config { + queue: Default::default(), + chain_column: ::ethcore::db::COL_LIGHT_CHAIN, + db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024), + db_compaction: compaction, + db_wal: cmd.wal, + }; + + config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; + config.queue.verifier_settings = cmd.verifier_settings; + + let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm)) + .map_err(|e| format!("Error starting light client: {}", e))?; + let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); + let provider = ::light::provider::LightProvider::new(service.client().clone(), txq); + + // start network. + // set up bootnodes + let mut net_conf = cmd.net_conf; + if !cmd.custom_bootnodes { + net_conf.boot_nodes = spec.nodes.clone(); + } + + // set network path. + net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); + let sync_params = LightSyncParams { + network_config: net_conf.into_basic().map_err(|e| format!("Failed to produce network config: {}", e))?, + client: Arc::new(provider), + network_id: cmd.network_id.unwrap_or(spec.network_id()), + subprotocol_name: ::ethsync::LIGHT_PROTOCOL, + }; + let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; + light_sync.start_network(); + + // start RPCs. + let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; + + // prepare account provider + let _account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); + // rest TODO + + // wait for ctrl-c. + Ok(wait_for_exit(panic_handler, None, None, can_restart)) +} + pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running @@ -163,12 +246,17 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } } - // set up panic handler - let panic_handler = PanicHandler::new_in_arc(); - // increase max number of open files raise_fd_limit(); + // run as light client. + if cmd.light { + return execute_light(cmd, can_restart, logger); + } + + // set up panic handler + let panic_handler = PanicHandler::new_in_arc(); + // load spec let spec = cmd.spec.spec()?; diff --git a/sync/src/api.rs b/sync/src/api.rs index 6feba062d..010bbeb23 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -43,7 +43,7 @@ pub const WARP_SYNC_PROTOCOL_ID: ProtocolId = *b"par"; /// Ethereum sync protocol pub const ETH_PROTOCOL: ProtocolId = *b"eth"; /// Ethereum light protocol -pub const LIGHT_PROTOCOL: ProtocolId = *b"plp"; +pub const LIGHT_PROTOCOL: ProtocolId = *b"pip"; /// Sync configuration #[derive(Debug, Clone, Copy)] From e3d6525d8313eae2c471cd457f6c4bfebef2ed2b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 19:26:51 +0100 Subject: [PATCH 50/91] store cumulative cost in pending request set. --- ethcore/light/src/net/mod.rs | 21 +++++++------ ethcore/light/src/net/request_set.rs | 45 +++++++++++++++++++++++----- ethcore/light/src/net/tests/mod.rs | 4 +-- sync/src/light_sync/mod.rs | 6 ++-- 4 files changed, 56 insertions(+), 20 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index de86f1ce5..5f78f9c22 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -303,12 +303,9 @@ impl LightProtocol { match peer.remote_flow { None => Err(Error::NotServer), Some((ref mut creds, ref params)) => { - // check that enough credits are available. - let mut temp_creds: Credits = creds.clone(); - for request in requests.requests() { - temp_creds.deduct_cost(params.compute_cost(request))?; - } - *creds = temp_creds; + // compute and deduct cost. + let cost = params.compute_cost_multi(requests.requests()); + creds.deduct_cost(cost)?; let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); io.send(*peer_id, packet::REQUEST, { @@ -318,7 +315,7 @@ impl LightProtocol { }); // begin timeout. - peer.pending_requests.insert(req_id, requests, SteadyTime::now()); + peer.pending_requests.insert(req_id, requests, cost, SteadyTime::now()); Ok(req_id) } } @@ -408,13 +405,18 @@ impl LightProtocol { Some(peer_info) => { let mut peer_info = peer_info.lock(); let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); + let cumulative_cost = peer_info.pending_requests.cumulative_cost(); let flow_info = peer_info.remote_flow.as_mut(); match (req_info, flow_info) { (Some(_), Some(flow_info)) => { let &mut (ref mut c, ref mut flow) = flow_info; - let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); - c.update_to(actual_credits); + + // only update if the cumulative cost of the request set is zero. + if cumulative_cost == 0.into() { + let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); + c.update_to(actual_credits); + } Ok(()) } @@ -520,6 +522,7 @@ impl LightProtocol { last_update: SteadyTime::now(), }); + trace!(target: "pip", "Sending status to peer {}", peer); io.send(*peer, packet::STATUS, status_packet); } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index a2391ef6f..094fa1894 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -27,22 +27,29 @@ use std::iter::FromIterator; use request::Request; use request::Requests; use net::{timeout, ReqId}; +use util::U256; use time::{Duration, SteadyTime}; +// Request set entry: requests + cost. +#[derive(Debug)] +struct Entry(Requests, U256); + /// Request set. #[derive(Debug)] pub struct RequestSet { counter: u64, + cumulative_cost: U256, base: Option, ids: HashMap, - reqs: BTreeMap, + reqs: BTreeMap, } impl Default for RequestSet { fn default() -> Self { RequestSet { counter: 0, + cumulative_cost: 0.into(), base: None, ids: HashMap::new(), reqs: BTreeMap::new(), @@ -52,10 +59,12 @@ impl Default for RequestSet { impl RequestSet { /// Push requests onto the stack. - pub fn insert(&mut self, req_id: ReqId, req: Requests, now: SteadyTime) { + pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: SteadyTime) { let counter = self.counter; + self.cumulative_cost = self.cumulative_cost + cost; + self.ids.insert(req_id, counter); - self.reqs.insert(counter, req); + self.reqs.insert(counter, Entry(req, cost)); if self.reqs.keys().next().map_or(true, |x| *x == counter) { self.base = Some(now); @@ -71,7 +80,7 @@ impl RequestSet { None => return None, }; - let req = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); + let Entry(req, cost) = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); match self.reqs.keys().next() { Some(k) if *k > id => self.base = Some(now), @@ -79,6 +88,7 @@ impl RequestSet { _ => {} } + self.cumulative_cost = self.cumulative_cost - cost; Some(req) } @@ -93,7 +103,7 @@ impl RequestSet { let first_req = self.reqs.values().next() .expect("base existing implies `reqs` non-empty; qed"); - base + compute_timeout(&first_req) <= now + base + compute_timeout(&first_req.0) <= now } /// Collect all pending request ids. @@ -108,6 +118,9 @@ impl RequestSet { /// Whether the set is empty. pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// The cumulative cost of all requests in the set. + pub fn cumulative_cost(&self) -> U256 { self.cumulative_cost } } // helper to calculate timeout for a specific set of requests. @@ -141,8 +154,8 @@ mod tests { let the_req = RequestBuilder::default().build(); let req_time = compute_timeout(&the_req); - req_set.insert(ReqId(0), the_req.clone(), test_begin); - req_set.insert(ReqId(1), the_req, test_begin + Duration::seconds(1)); + req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin); + req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1)); assert_eq!(req_set.base, Some(test_begin)); @@ -153,4 +166,22 @@ mod tests { assert!(!req_set.check_timeout(test_end)); assert!(req_set.check_timeout(test_end + Duration::seconds(1))); } + + #[test] + fn cumulative_cost() { + let the_req = RequestBuilder::default().build(); + let test_begin = SteadyTime::now(); + let test_end = test_begin + Duration::seconds(1); + let mut req_set = RequestSet::default(); + + for i in 0..5 { + req_set.insert(ReqId(i), the_req.clone(), 1.into(), test_begin); + assert_eq!(req_set.cumulative_cost, (i + 1).into()); + } + + for i in (0..5).rev() { + assert!(req_set.remove(&ReqId(i), test_end).is_some()); + assert_eq!(req_set.cumulative_cost, i.into()); + } + } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index e2081534c..67dfe8131 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -600,8 +600,8 @@ fn id_guard() { let mut pending_requests = RequestSet::default(); - pending_requests.insert(req_id_1, req.clone(), ::time::SteadyTime::now()); - pending_requests.insert(req_id_2, req, ::time::SteadyTime::now()); + pending_requests.insert(req_id_1, req.clone(), 0.into(), ::time::SteadyTime::now()); + pending_requests.insert(req_id_2, req, 1.into(), ::time::SteadyTime::now()); proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { local_credits: flow_params.create_credits(), diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 4590103e7..1b092ab03 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -418,8 +418,10 @@ impl LightSync { let best_td = chain_info.pending_total_difficulty; let sync_target = match *self.best_seen.lock() { Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash), - _ => { - trace!(target: "sync", "No target to sync to."); + ref other => { + let network_score = other.as_ref().map(|target| target.head_td); + trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}", + network_score, best_td); *state = SyncState::Idle; return; } From 35d9a9815eb9798af58150ec7f986d890a1f8fcb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 20:14:40 +0100 Subject: [PATCH 51/91] mild abstraction of RPC dependencies --- parity/dapps.rs | 4 +- parity/rpc.rs | 22 ++-- parity/rpc_apis.rs | 264 ++++++++++++++++++++++++--------------------- parity/run.rs | 5 +- parity/signer.rs | 24 +++-- 5 files changed, 177 insertions(+), 142 deletions(-) diff --git a/parity/dapps.rs b/parity/dapps.rs index bbd5f4960..29268e904 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -59,7 +59,7 @@ impl Default for Configuration { } pub struct Dependencies { - pub apis: Arc, + pub apis: Arc, pub client: Arc, pub sync: Arc, pub remote: parity_reactor::TokioRemote, @@ -182,7 +182,7 @@ mod server { } else { rpc_apis::ApiSet::UnsafeContext }; - let apis = rpc_apis::setup_rpc(deps.stats, deps.apis.clone(), api_set); + let apis = rpc_apis::setup_rpc(deps.stats, &*deps.apis, api_set); let start_result = match auth { None => { server.start_unsecured_http(url, apis, deps.remote) diff --git a/parity/rpc.rs b/parity/rpc.rs index a435f24db..e4307f09b 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -82,8 +82,8 @@ impl fmt::Display for IpcConfiguration { } } -pub struct Dependencies { - pub apis: Arc, +pub struct Dependencies { + pub apis: Arc, pub remote: TokioRemote, pub stats: Arc, } @@ -109,7 +109,9 @@ impl rpc::IpcMetaExtractor for RpcExtractor { } } -pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { +pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> + where D: rpc_apis::Dependencies +{ if !conf.enabled { return Ok(None); } @@ -119,12 +121,14 @@ pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result MetaIoHandler { - rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) +fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler> + where D: rpc_apis::Dependencies +{ + rpc_apis::setup_rpc(deps.stats.clone(), &*deps.apis, apis) } -pub fn setup_http_rpc_server( - dependencies: &Dependencies, +pub fn setup_http_rpc_server( + dependencies: &Dependencies, url: &SocketAddr, cors_domains: Option>, allowed_hosts: Option>, @@ -145,12 +149,12 @@ pub fn setup_http_rpc_server( } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { +pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { return Ok(None); } Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) } -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { +pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { let handler = setup_apis(apis, dependencies); let remote = dependencies.remote.clone(); match rpc::start_ipc(addr, handler, remote, RpcExtractor) { diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 469245c19..a27fbfa26 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -27,7 +27,7 @@ use ethcore::client::Client; use ethcore::miner::{Miner, ExternalMiner}; use ethcore::snapshot::SnapshotService; use ethcore_rpc::{Metadata, NetworkSettings}; -use ethcore_rpc::informant::{Middleware, RpcStats, ClientNotifier}; +use ethcore_rpc::informant::{ActivityNotifier, Middleware, RpcStats, ClientNotifier}; use ethcore_rpc::dispatch::FullDispatcher; use ethsync::{ManageNetwork, SyncProvider}; use hash_fetch::fetch::Client as FetchClient; @@ -112,25 +112,6 @@ impl FromStr for ApiSet { } } -pub struct Dependencies { - pub signer_service: Arc, - pub client: Arc, - pub snapshot: Arc, - pub sync: Arc, - pub net: Arc, - pub secret_store: Arc, - pub miner: Arc, - pub external_miner: Arc, - pub logger: Arc, - pub settings: Arc, - pub net_service: Arc, - pub updater: Arc, - pub geth_compatibility: bool, - pub dapps_interface: Option, - pub dapps_port: Option, - pub fetch: FetchClient, -} - fn to_modules(apis: &[Api]) -> BTreeMap { let mut modules = BTreeMap::new(); for api in apis { @@ -151,6 +132,145 @@ fn to_modules(apis: &[Api]) -> BTreeMap { modules } +/// RPC dependencies can be used to initialize RPC endpoints from APIs. +pub trait Dependencies { + type Notifier: ActivityNotifier; + + /// Create the activity notifier. + fn activity_notifier(&self) -> Self::Notifier; + + /// Extend the given I/O handler with endpoints for each API. + fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]); +} + +/// RPC dependencies for a full node. +pub struct FullDependencies { + pub signer_service: Arc, + pub client: Arc, + pub snapshot: Arc, + pub sync: Arc, + pub net: Arc, + pub secret_store: Arc, + pub miner: Arc, + pub external_miner: Arc, + pub logger: Arc, + pub settings: Arc, + pub net_service: Arc, + pub updater: Arc, + pub geth_compatibility: bool, + pub dapps_interface: Option, + pub dapps_port: Option, + pub fetch: FetchClient, +} + +impl Dependencies for FullDependencies { + type Notifier = ClientNotifier; + + fn activity_notifier(&self) -> ClientNotifier { + ClientNotifier { + client: self.client.clone(), + } + } + + fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &[Api]) { + use ethcore_rpc::v1::*; + + macro_rules! add_signing_methods { + ($namespace:ident, $handler:expr, $deps:expr) => { + { + let deps = &$deps; + let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); + if deps.signer_service.is_enabled() { + $handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) + } else { + $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) + } + } + } + } + + let dispatcher = FullDispatcher::new(Arc::downgrade(&self.client), Arc::downgrade(&self.miner)); + for api in apis { + match *api { + Api::Web3 => { + handler.extend_with(Web3Client::new().to_delegate()); + }, + Api::Net => { + handler.extend_with(NetClient::new(&self.sync).to_delegate()); + }, + Api::Eth => { + let client = EthClient::new( + &self.client, + &self.snapshot, + &self.sync, + &self.secret_store, + &self.miner, + &self.external_miner, + EthClientOptions { + pending_nonce_from_queue: self.geth_compatibility, + allow_pending_receipt_query: !self.geth_compatibility, + send_block_number_in_get_work: !self.geth_compatibility, + } + ); + handler.extend_with(client.to_delegate()); + + let filter_client = EthFilterClient::new(&self.client, &self.miner); + handler.extend_with(filter_client.to_delegate()); + + add_signing_methods!(EthSigning, handler, self); + }, + Api::Personal => { + handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); + }, + Api::Signer => { + handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service).to_delegate()); + }, + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with(ParityClient::new( + &self.client, + &self.miner, + &self.sync, + &self.updater, + &self.net_service, + &self.secret_store, + self.logger.clone(), + self.settings.clone(), + signer, + self.dapps_interface.clone(), + self.dapps_port, + ).to_delegate()); + + add_signing_methods!(EthSigning, handler, self); + add_signing_methods!(ParitySigning, handler, self); + }, + Api::ParityAccounts => { + handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate()); + }, + Api::ParitySet => { + handler.extend_with(ParitySetClient::new( + &self.client, + &self.miner, + &self.updater, + &self.net_service, + self.fetch.clone(), + ).to_delegate()) + }, + Api::Traces => { + handler.extend_with(TracesClient::new(&self.client, &self.miner).to_delegate()) + }, + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + } + } + } +} + impl ApiSet { pub fn list_apis(&self) -> HashSet { let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc] @@ -172,110 +292,12 @@ impl ApiSet { } } -macro_rules! add_signing_methods { - ($namespace:ident, $handler:expr, $deps:expr) => { - { - let handler = &mut $handler; - let deps = &$deps; - let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); - if deps.signer_service.is_enabled() { - handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) - } else { - handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) - } - } - } -} - -pub fn setup_rpc(stats: Arc, deps: Arc, apis: ApiSet) -> MetaIoHandler { - use ethcore_rpc::v1::*; - - let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, ClientNotifier { - client: deps.client.clone(), - })); - +pub fn setup_rpc(stats: Arc, deps: &D, apis: ApiSet) -> MetaIoHandler> { + let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, deps.activity_notifier())); // it's turned into vector, cause ont of the cases requires &[] let apis = apis.list_apis().into_iter().collect::>(); - let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); + deps.extend_with_set(&mut handler, &apis[..]); - for api in &apis { - match *api { - Api::Web3 => { - handler.extend_with(Web3Client::new().to_delegate()); - }, - Api::Net => { - handler.extend_with(NetClient::new(&deps.sync).to_delegate()); - }, - Api::Eth => { - let client = EthClient::new( - &deps.client, - &deps.snapshot, - &deps.sync, - &deps.secret_store, - &deps.miner, - &deps.external_miner, - EthClientOptions { - pending_nonce_from_queue: deps.geth_compatibility, - allow_pending_receipt_query: !deps.geth_compatibility, - send_block_number_in_get_work: !deps.geth_compatibility, - } - ); - handler.extend_with(client.to_delegate()); - - let filter_client = EthFilterClient::new(&deps.client, &deps.miner); - handler.extend_with(filter_client.to_delegate()); - - add_signing_methods!(EthSigning, handler, deps); - }, - Api::Personal => { - handler.extend_with(PersonalClient::new(&deps.secret_store, dispatcher.clone(), deps.geth_compatibility).to_delegate()); - }, - Api::Signer => { - handler.extend_with(SignerClient::new(&deps.secret_store, dispatcher.clone(), &deps.signer_service).to_delegate()); - }, - Api::Parity => { - let signer = match deps.signer_service.is_enabled() { - true => Some(deps.signer_service.clone()), - false => None, - }; - handler.extend_with(ParityClient::new( - &deps.client, - &deps.miner, - &deps.sync, - &deps.updater, - &deps.net_service, - &deps.secret_store, - deps.logger.clone(), - deps.settings.clone(), - signer, - deps.dapps_interface.clone(), - deps.dapps_port, - ).to_delegate()); - - add_signing_methods!(EthSigning, handler, deps); - add_signing_methods!(ParitySigning, handler, deps); - }, - Api::ParityAccounts => { - handler.extend_with(ParityAccountsClient::new(&deps.secret_store).to_delegate()); - }, - Api::ParitySet => { - handler.extend_with(ParitySetClient::new( - &deps.client, - &deps.miner, - &deps.updater, - &deps.net_service, - deps.fetch.clone(), - ).to_delegate()) - }, - Api::Traces => { - handler.extend_with(TracesClient::new(&deps.client, &deps.miner).to_delegate()) - }, - Api::Rpc => { - let modules = to_modules(&apis); - handler.extend_with(RpcClient::new(modules).to_delegate()); - } - } - } handler } diff --git a/parity/run.rs b/parity/run.rs index 92aba9ed7..be4e25f97 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -498,7 +498,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // set up dependencies for rpc servers let rpc_stats = Arc::new(informant::RpcStats::default()); let signer_path = cmd.signer_conf.signer_path.clone(); - let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { + let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies { signer_service: Arc::new(rpc_apis::SignerService::new(move || { signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) }, cmd.ui_address)), @@ -553,7 +553,8 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R remote: event_loop.raw_remote(), rpc_stats: rpc_stats.clone(), }; - let signer_server = signer::start(cmd.signer_conf.clone(), signer_deps)?; + let signing_queue = deps_for_rpc_apis.signer_service.queue(); + let signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; // secret store key server let secretstore_deps = secretstore::Dependencies { }; diff --git a/parity/signer.rs b/parity/signer.rs index 0d71604d4..b4c70494d 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -23,7 +23,7 @@ pub use ethcore_signer::Server as SignerServer; use ansi_term::Colour; use dir::default_data_path; use ethcore_rpc::informant::RpcStats; -use ethcore_rpc; +use ethcore_rpc::{self, ConfirmationsQueue}; use ethcore_signer as signer; use helpers::replace_home; use parity_reactor::TokioRemote; @@ -55,8 +55,8 @@ impl Default for Configuration { } } -pub struct Dependencies { - pub apis: Arc, +pub struct Dependencies { + pub apis: Arc, pub remote: TokioRemote, pub rpc_stats: Arc, } @@ -77,11 +77,15 @@ impl signer::MetaExtractor for StandardExtractor { } } -pub fn start(conf: Configuration, deps: Dependencies) -> Result, String> { +pub fn start( + conf: Configuration, + queue: Arc, + deps: Dependencies, +) -> Result, String> { if !conf.enabled { Ok(None) } else { - Ok(Some(do_start(conf, deps)?)) + Ok(Some(do_start(conf, queue, deps)?)) } } @@ -125,14 +129,18 @@ pub fn generate_new_token(path: String) -> io::Result { Ok(code) } -fn do_start(conf: Configuration, deps: Dependencies) -> Result { +fn do_start( + conf: Configuration, + queue: Arc, + deps: Dependencies +) -> Result { let addr = format!("{}:{}", conf.interface, conf.port) .parse() .map_err(|_| format!("Invalid port specified: {}", conf.port))?; let start_result = { let server = signer::ServerBuilder::new( - deps.apis.signer_service.queue(), + queue, codes_path(conf.signer_path), ); if conf.skip_origin_validation { @@ -141,7 +149,7 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result Date: Wed, 22 Mar 2017 21:09:43 +0100 Subject: [PATCH 52/91] light client RPC dependencies --- ethcore/light/src/lib.rs | 1 + parity/rpc_apis.rs | 115 ++++++++++++++++++++++++++++++++- rpc/src/v1/helpers/dispatch.rs | 1 - rpc/src/v1/impls/light/mod.rs | 3 + rpc/src/v1/impls/light/net.rs | 49 ++++++++++++++ rpc/src/v1/impls/net.rs | 2 +- sync/src/api.rs | 9 +++ 7 files changed, 175 insertions(+), 5 deletions(-) create mode 100644 rpc/src/v1/impls/light/net.rs diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 828d77043..82b6ea126 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -55,6 +55,7 @@ pub mod remote { mod types; +pub use self::cache::Cache; pub use self::provider::Provider; pub use self::transaction_queue::TransactionQueue; pub use types::request as request; diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index a27fbfa26..3f8f38a3a 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -28,12 +28,13 @@ use ethcore::miner::{Miner, ExternalMiner}; use ethcore::snapshot::SnapshotService; use ethcore_rpc::{Metadata, NetworkSettings}; use ethcore_rpc::informant::{ActivityNotifier, Middleware, RpcStats, ClientNotifier}; -use ethcore_rpc::dispatch::FullDispatcher; -use ethsync::{ManageNetwork, SyncProvider}; +use ethcore_rpc::dispatch::{FullDispatcher, LightDispatcher}; +use ethsync::{ManageNetwork, SyncProvider, LightSync}; use hash_fetch::fetch::Client as FetchClient; use jsonrpc_core::{MetaIoHandler}; +use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; use updater::Updater; -use util::RotatingLogger; +use util::{Mutex, RwLock, RotatingLogger}; #[derive(Debug, PartialEq, Clone, Eq, Hash)] pub enum Api { @@ -271,6 +272,114 @@ impl Dependencies for FullDependencies { } } +/// Light client notifier. Doesn't do anything yet, but might in the future. +pub struct LightClientNotifier; + +impl ActivityNotifier for LightClientNotifier { + fn active(&self) {} +} + +/// RPC dependencies for a light client. +pub struct LightDependencies { + pub signer_service: Arc, + pub client: Arc<::light::client::Client>, + pub sync: Arc, + pub net: Arc, + pub secret_store: Arc, + pub logger: Arc, + pub settings: Arc, + pub on_demand: Arc<::light::on_demand::OnDemand>, + pub cache: Arc>, + pub transaction_queue: Arc>, + pub updater: Arc, + pub dapps_interface: Option, + pub dapps_port: Option, + pub fetch: FetchClient, + pub geth_compatibility: bool, +} + +impl Dependencies for LightDependencies { + type Notifier = LightClientNotifier; + + fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } + fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]) { + use ethcore_rpc::v1::*; + + let dispatcher = LightDispatcher::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + self.transaction_queue.clone(), + ); + + for api in apis { + match *api { + Api::Web3 => { + handler.extend_with(Web3Client::new().to_delegate()); + }, + Api::Net => { + handler.extend_with(light::NetClient::new(self.sync.clone()).to_delegate()); + }, + Api::Eth => { + let client = light::EthClient::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.transaction_queue.clone(), + self.secret_store.clone(), + self.cache.clone(), + ); + handler.extend_with(client.to_delegate()); + + // TODO: filters and signing methods. + }, + Api::Personal => { + handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); + }, + Api::Signer => { + handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service).to_delegate()); + }, + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with(light::ParityClient::new( + Arc::new(dispatcher.clone()), + self.secret_store.clone(), + self.logger.clone(), + self.settings.clone(), + signer, + self.dapps_interface.clone(), + self.dapps_port, + ).to_delegate()); + + // TODO + //add_signing_methods!(EthSigning, handler, self); + //add_signing_methods!(ParitySigning, handler, self); + }, + Api::ParityAccounts => { + handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate()); + }, + Api::ParitySet => { + handler.extend_with(light::ParitySetClient::new( + self.sync.clone(), + self.fetch.clone(), + ).to_delegate()) + }, + Api::Traces => { + handler.extend_with(light::TracesClient.to_delegate()) + }, + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + } + } + } +} + impl ApiSet { pub fn list_apis(&self) -> HashSet { let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc] diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 8a99a7239..e1b298b9f 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -207,7 +207,6 @@ pub fn fetch_gas_price_corpus( } /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. -/// Light client `ETH` RPC. #[derive(Clone)] pub struct LightDispatcher { /// Sync service. diff --git a/rpc/src/v1/impls/light/mod.rs b/rpc/src/v1/impls/light/mod.rs index 8c2e6d240..38ba2438e 100644 --- a/rpc/src/v1/impls/light/mod.rs +++ b/rpc/src/v1/impls/light/mod.rs @@ -23,7 +23,10 @@ pub mod eth; pub mod parity; pub mod parity_set; pub mod trace; +pub mod net; pub use self::eth::EthClient; pub use self::parity::ParityClient; pub use self::parity_set::ParitySetClient; +pub use self::net::NetClient; +pub use self::trace::TracesClient; diff --git a/rpc/src/v1/impls/light/net.rs b/rpc/src/v1/impls/light/net.rs new file mode 100644 index 000000000..4f0ede48f --- /dev/null +++ b/rpc/src/v1/impls/light/net.rs @@ -0,0 +1,49 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Net rpc implementation. +use std::sync::Arc; +use jsonrpc_core::Error; +use ethsync::LightSyncProvider; +use v1::traits::Net; + +/// Net rpc implementation. +pub struct NetClient { + sync: Arc +} + +impl NetClient where S: LightSyncProvider { + /// Creates new NetClient. + pub fn new(sync: Arc) -> Self { + NetClient { + sync: sync, + } + } +} + +impl Net for NetClient where S: LightSyncProvider { + fn version(&self) -> Result { + Ok(format!("{}", self.sync.network_id()).to_owned()) + } + + fn peer_count(&self) -> Result { + Ok(format!("0x{:x}", self.sync.peer_numbers().connected as u64).to_owned()) + } + + fn is_listening(&self) -> Result { + Ok(true) + } +} diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 5588805ab..399b2201a 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -21,7 +21,7 @@ use ethsync::SyncProvider; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient where S: SyncProvider { +pub struct NetClient { sync: Weak } diff --git a/sync/src/api.rs b/sync/src/api.rs index 010bbeb23..e6c093893 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -642,6 +642,9 @@ pub trait LightSyncProvider { /// Get peers information fn peers(&self) -> Vec; + /// Get network id. + fn network_id(&self) -> u64; + /// Get the enode if available. fn enode(&self) -> Option; @@ -666,6 +669,7 @@ pub struct LightSync { proto: Arc, network: NetworkService, subprotocol_name: [u8; 3], + network_id: u64, } impl LightSync { @@ -701,6 +705,7 @@ impl LightSync { proto: light_proto, network: service, subprotocol_name: params.subprotocol_name, + network_id: params.network_id, }) } @@ -796,6 +801,10 @@ impl LightSyncProvider for LightSync { self.network.external_url() } + fn network_id(&self) -> u64 { + self.network_id + } + fn transactions_stats(&self) -> BTreeMap { Default::default() // TODO } From 83911a7290d7d9090946f616e66082e5fd7c3ead Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 22:00:52 +0100 Subject: [PATCH 53/91] complete quick'n'dirty light CLI --- parity/dapps.rs | 100 +++++++++++++++++++++++---------------------- parity/rpc_apis.rs | 1 - parity/run.rs | 95 ++++++++++++++++++++++++++++++++++++------ sync/src/api.rs | 6 +++ 4 files changed, 140 insertions(+), 62 deletions(-) diff --git a/parity/dapps.rs b/parity/dapps.rs index 29268e904..970f51b96 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -18,13 +18,15 @@ use std::path::PathBuf; use std::sync::Arc; use dir::default_data_path; -use ethcore::client::Client; +use ethcore::client::{Client, BlockChainClient, BlockId}; +use ethcore::transaction::{Transaction, Action}; use ethcore_rpc::informant::RpcStats; -use ethsync::SyncProvider; use hash_fetch::fetch::Client as FetchClient; +use hash_fetch::urlhint::ContractClient; use helpers::replace_home; use rpc_apis::{self, SignerService}; use parity_reactor; +use util::{Bytes, Address, U256}; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { @@ -58,17 +60,56 @@ impl Default for Configuration { } } -pub struct Dependencies { - pub apis: Arc, +/// Registrar implementation of the full client. +pub struct FullRegistrar { + /// Handle to the full client. pub client: Arc, - pub sync: Arc, +} + +impl ContractClient for FullRegistrar { + fn registrar(&self) -> Result { + self.client.additional_params().get("registrar") + .ok_or_else(|| "Registrar not defined.".into()) + .and_then(|registrar| { + registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e)) + }) + } + + fn call(&self, address: Address, data: Bytes) -> Result { + let from = Address::default(); + let transaction = Transaction { + nonce: self.client.latest_nonce(&from), + action: Action::Call(address), + gas: U256::from(50_000_000), + gas_price: U256::default(), + value: U256::default(), + data: data, + }.fake_sign(from); + + self.client.call(&transaction, BlockId::Latest, Default::default()) + .map_err(|e| format!("{:?}", e)) + .map(|executed| { + executed.output + }) + } +} + +// TODO: light client implementation forwarding to OnDemand and waiting for future +// to resolve. + +pub struct Dependencies { + pub apis: Arc, + pub sync_status: Arc<::ethcore_dapps::SyncStatus>, + pub contract_client: Arc, pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, pub stats: Arc, } -pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { +pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> + where D: rpc_apis::Dependencies +{ if !configuration.enabled { return Ok(None); } @@ -130,21 +171,16 @@ mod server { use std::sync::Arc; use std::net::SocketAddr; use std::io; - use util::{Bytes, Address, U256}; use ansi_term::Colour; - use ethcore::transaction::{Transaction, Action}; - use ethcore::client::{Client, BlockChainClient, BlockId}; use ethcore_dapps::{AccessControlAllowOrigin, Host}; - use ethcore_rpc::is_major_importing; - use hash_fetch::urlhint::ContractClient; use parity_reactor; use rpc_apis; pub use ethcore_dapps::Server as WebappServer; - pub fn setup_dapps_server( - deps: Dependencies, + pub fn setup_dapps_server( + deps: Dependencies, dapps_path: PathBuf, extra_dapps: Vec, url: &SocketAddr, @@ -157,18 +193,16 @@ mod server { let server = dapps::ServerBuilder::new( &dapps_path, - Arc::new(Registrar { client: deps.client.clone() }), + deps.contract_client, parity_reactor::Remote::new(deps.remote.clone()), ); let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); let cors: Option> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); - let sync = deps.sync.clone(); - let client = deps.client.clone(); let signer = deps.signer.clone(); let server = server .fetch(deps.fetch.clone()) - .sync_status(Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info()))) + .sync_status(deps.sync_status) .web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token))) .extra_dapps(&extra_dapps) .signer_address(deps.signer.address()) @@ -201,36 +235,4 @@ mod server { Ok(server) => Ok(server), } } - - struct Registrar { - client: Arc, - } - - impl ContractClient for Registrar { - fn registrar(&self) -> Result { - self.client.additional_params().get("registrar") - .ok_or_else(|| "Registrar not defined.".into()) - .and_then(|registrar| { - registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e)) - }) - } - - fn call(&self, address: Address, data: Bytes) -> Result { - let from = Address::default(); - let transaction = Transaction { - nonce: self.client.latest_nonce(&from), - action: Action::Call(address), - gas: U256::from(50_000_000), - gas_price: U256::default(), - value: U256::default(), - data: data, - }.fake_sign(from); - - self.client.call(&transaction, BlockId::Latest, Default::default()) - .map_err(|e| format!("{:?}", e)) - .map(|executed| { - executed.output - }) - } - } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 3f8f38a3a..6f5a0c5f8 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -291,7 +291,6 @@ pub struct LightDependencies { pub on_demand: Arc<::light::on_demand::OnDemand>, pub cache: Arc>, pub transaction_queue: Arc>, - pub updater: Arc, pub dapps_interface: Option, pub dapps_port: Option, pub fetch: FetchClient, diff --git a/parity/run.rs b/parity/run.rs index be4e25f97..74cae07fd 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -30,6 +30,7 @@ use ethcore::account_provider::{AccountProvider, AccountProviderSettings}; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; use ethcore::snapshot; use ethcore::verification::queue::VerifierSettings; +use light::Cache as LightDataCache; use ethsync::SyncConfig; use informant::Informant; use updater::{UpdatePolicy, Updater}; @@ -61,6 +62,10 @@ const SNAPSHOT_PERIOD: u64 = 10000; // how many blocks to wait before starting a periodic snapshot. const SNAPSHOT_HISTORY: u64 = 100; +// Number of minutes before a given gas price corpus should expire. +// Light client only. +const GAS_CORPUS_EXPIRATION_MINUTES: i64 = 60 * 6; + // Pops along with error messages when a password is missing or invalid. const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file."; @@ -155,7 +160,7 @@ impl ::local_store::NodeInfo for FullNodeInfo { } // helper for light execution. -fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) -> Result<(bool, Option), String> { +fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { use light::client as light_client; use ethsync::{LightSyncParams, LightSync, ManageNetwork}; use util::RwLock; @@ -206,7 +211,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) - let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm)) .map_err(|e| format!("Error starting light client: {}", e))?; let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); - let provider = ::light::provider::LightProvider::new(service.client().clone(), txq); + let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone()); // start network. // set up bootnodes @@ -215,6 +220,13 @@ fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) - net_conf.boot_nodes = spec.nodes.clone(); } + // TODO: configurable cache size. + let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES)); + let cache = Arc::new(::util::Mutex::new(cache)); + + // start on_demand service. + let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); + // set network path. net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); let sync_params = LightSyncParams { @@ -222,16 +234,70 @@ fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) - client: Arc::new(provider), network_id: cmd.network_id.unwrap_or(spec.network_id()), subprotocol_name: ::ethsync::LIGHT_PROTOCOL, + handlers: vec![on_demand.clone()], }; let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; + let light_sync = Arc::new(light_sync); light_sync.start_network(); // start RPCs. + // spin up event loop + let event_loop = EventLoop::spawn(); + + // fetch service + let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; // prepare account provider - let _account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); - // rest TODO + let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); + let rpc_stats = Arc::new(informant::RpcStats::default()); + let signer_path = cmd.signer_conf.signer_path.clone(); + + let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { + signer_service: Arc::new(rpc_apis::SignerService::new(move || { + signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) + }, cmd.ui_address)), + client: service.client().clone(), + sync: light_sync.clone(), + net: light_sync.clone(), + secret_store: account_provider, + logger: logger, + settings: Arc::new(cmd.net_settings), + on_demand: on_demand, + cache: cache, + transaction_queue: txq, + dapps_interface: match cmd.dapps_conf.enabled { + true => Some(cmd.dapps_conf.interface.clone()), + false => None, + }, + dapps_port: match cmd.dapps_conf.enabled { + true => Some(cmd.dapps_conf.port), + false => None, + }, + fetch: fetch, + geth_compatibility: cmd.geth_compatibility, + }); + + let dependencies = rpc::Dependencies { + apis: deps_for_rpc_apis.clone(), + remote: event_loop.raw_remote(), + stats: rpc_stats.clone(), + }; + + // start rpc servers + let _http_server = rpc::new_http(cmd.http_conf, &dependencies)?; + let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; + + // the signer server + let signer_deps = signer::Dependencies { + apis: deps_for_rpc_apis.clone(), + remote: event_loop.raw_remote(), + rpc_stats: rpc_stats.clone(), + }; + let signing_queue = deps_for_rpc_apis.signer_service.queue(); + let _signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; + + // TODO: Dapps // wait for ctrl-c. Ok(wait_for_exit(panic_handler, None, None, can_restart)) @@ -536,14 +602,19 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; // the dapps server - let dapps_deps = dapps::Dependencies { - apis: deps_for_rpc_apis.clone(), - client: client.clone(), - sync: sync_provider.clone(), - remote: event_loop.raw_remote(), - fetch: fetch.clone(), - signer: deps_for_rpc_apis.signer_service.clone(), - stats: rpc_stats.clone(), + let dapps_deps = { + let (sync, client) = (sync_provider.clone(), client.clone()); + let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() }); + + dapps::Dependencies { + apis: deps_for_rpc_apis.clone(), + sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())), + contract_client: contract_client, + remote: event_loop.raw_remote(), + fetch: fetch.clone(), + signer: deps_for_rpc_apis.signer_service.clone(), + stats: rpc_stats.clone(), + } }; let dapps_server = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; diff --git a/sync/src/api.rs b/sync/src/api.rs index e6c093893..927d8fce6 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -662,6 +662,8 @@ pub struct LightSyncParams { pub network_id: u64, /// Subprotocol name. pub subprotocol_name: [u8; 3], + /// Other handlers to attach. + pub handlers: Vec>, } /// Service for light synchronization. @@ -696,6 +698,10 @@ impl LightSync { let sync_handler = try!(SyncHandler::new(params.client.clone())); light_proto.add_handler(Arc::new(sync_handler)); + for handler in params.handlers { + light_proto.add_handler(handler); + } + Arc::new(light_proto) }; From 23a6b1998512960383cf377c08df4203eeb4ab1d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 22:10:02 +0100 Subject: [PATCH 54/91] fix import --- parity/rpc_apis.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index ad61dc8ca..76e54ff81 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -34,7 +34,7 @@ use hash_fetch::fetch::Client as FetchClient; use jsonrpc_core::{MetaIoHandler}; use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; use updater::Updater; -use util::{Mutex, RwLock, RotatingLogger}; +use util::{Mutex, RwLock}; use ethcore_logger::RotatingLogger; #[derive(Debug, PartialEq, Clone, Eq, Hash)] From a55001ad1de1a291dfa3800a76bc242828450c45 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 02:55:25 +0100 Subject: [PATCH 55/91] fix deadlock in on_demand --- ethcore/light/src/on_demand/mod.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 8d451c88e..6e37a74ae 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -210,7 +210,7 @@ impl OnDemand { /// it as easily. pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { let (sender, receiver) = oneshot::channel(); - match self.cache.lock().block_header(&req.0) { + match { self.cache.lock().block_header(&req.0) } { Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } @@ -232,7 +232,7 @@ impl OnDemand { sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } else { - match self.cache.lock().block_body(&req.hash) { + match { self.cache.lock().block_body(&req.hash) } { Some(body) => { let mut stream = RlpStream::new_list(3); stream.append_raw(&req.header.into_inner(), 1); @@ -255,7 +255,7 @@ impl OnDemand { if req.0.receipts_root() == SHA3_NULL_RLP { sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); } else { - match self.cache.lock().block_receipts(&req.0.hash()) { + match { self.cache.lock().block_receipts(&req.0.hash()) } { Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } @@ -397,10 +397,12 @@ impl Handler for OnDemand { } fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { - let mut peers = self.peers.write(); - if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { - peer.status.update_from(&announcement); - peer.capabilities.update_from(&announcement); + { + let mut peers = self.peers.write(); + if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { + peer.status.update_from(&announcement); + peer.capabilities.update_from(&announcement); + } } self.dispatch_orphaned(ctx.as_basic()); From b96eb458770fd6e3a9b34115255ef2e36c4f8bbc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 03:23:53 +0100 Subject: [PATCH 56/91] eth_syncing RPC for light client --- rpc/src/v1/impls/light/eth.rs | 17 +++++++++++++++- sync/src/api.rs | 16 +++++++++++---- sync/src/light_sync/mod.rs | 37 +++++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 251daf90d..ba32bf35c 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -245,7 +245,22 @@ impl Eth for EthClient { } fn syncing(&self) -> Result { - rpc_unimplemented!() + if self.sync.is_major_importing() { + let chain_info = self.client.chain_info(); + let current_block = U256::from(chain_info.best_block_number); + let highest_block = self.sync.highest_block().map(U256::from) + .unwrap_or_else(|| current_block.clone()); + + Ok(SyncStatus::Info(SyncInfo { + starting_block: U256::from(self.sync.start_block()).into(), + current_block: current_block.into(), + highest_block: highest_block.into(), + warp_chunks_amount: None, + warp_chunks_processed: None, + })) + } else { + Ok(SyncStatus::None) + } } fn author(&self, _meta: Self::Metadata) -> BoxFuture { diff --git a/sync/src/api.rs b/sync/src/api.rs index 927d8fce6..bfa33e7b8 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -669,6 +669,7 @@ pub struct LightSyncParams { /// Service for light synchronization. pub struct LightSync { proto: Arc, + sync: Arc<::light_sync::SyncInfo + Sync + Send>, network: NetworkService, subprotocol_name: [u8; 3], network_id: u64, @@ -682,7 +683,7 @@ impl LightSync { use light_sync::LightSync as SyncHandler; // initialize light protocol handler and attach sync module. - let light_proto = { + let (sync, light_proto) = { let light_params = LightParams { network_id: params.network_id, flow_params: Default::default(), // or `None`? @@ -695,20 +696,21 @@ impl LightSync { }; let mut light_proto = LightProtocol::new(params.client.clone(), light_params); - let sync_handler = try!(SyncHandler::new(params.client.clone())); - light_proto.add_handler(Arc::new(sync_handler)); + let sync_handler = Arc::new(try!(SyncHandler::new(params.client.clone()))); + light_proto.add_handler(sync_handler.clone()); for handler in params.handlers { light_proto.add_handler(handler); } - Arc::new(light_proto) + (sync_handler, Arc::new(light_proto)) }; let service = try!(NetworkService::new(params.network_config)); Ok(LightSync { proto: light_proto, + sync: sync, network: service, subprotocol_name: params.subprotocol_name, network_id: params.network_id, @@ -726,6 +728,12 @@ impl LightSync { } } +impl ::std::ops::Deref for LightSync { + type Target = ::light_sync::SyncInfo; + + fn deref(&self) -> &Self::Target { &*self.sync } +} + impl ManageNetwork for LightSync { fn accept_unreserved_peers(&self) { self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 1b092ab03..18fe3c953 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -206,6 +206,7 @@ impl<'a> ResponseContext for ResponseCtx<'a> { /// Light client synchronization manager. See module docs for more details. pub struct LightSync { + start_block_number: u64, best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. client: Arc, @@ -525,6 +526,7 @@ impl LightSync { /// so it can act on events. pub fn new(client: Arc) -> Result { Ok(LightSync { + start_block_number: client.as_light_client().chain_info().best_block_number, best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), client: client, @@ -533,3 +535,38 @@ impl LightSync { }) } } + +/// Trait for erasing the type of a light sync object and exposing read-only methods. +pub trait SyncInfo { + /// Get the highest block advertised on the network. + fn highest_block(&self) -> Option; + + /// Get the block number at the time of sync start. + fn start_block(&self) -> u64; + + /// Whether major sync is underway. + fn is_major_importing(&self) -> bool; +} + +impl SyncInfo for LightSync { + fn highest_block(&self) -> Option { + self.best_seen.lock().as_ref().map(|x| x.head_num) + } + + fn start_block(&self) -> u64 { + self.start_block_number + } + + fn is_major_importing(&self) -> bool { + const EMPTY_QUEUE: usize = 3; + + if self.client.as_light_client().queue_info().unverified_queue_size > EMPTY_QUEUE { + return true; + } + + match *self.state.lock() { + SyncState::Idle => false, + _ => true, + } + } +} From 0d110ed47c72c4c6b51f7219cc88efa0dfa48ec8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 04:00:22 +0100 Subject: [PATCH 57/91] apply pending changes to chain after DB commit --- ethcore/light/src/client/header_chain.rs | 29 ++++++++++++++++++++---- ethcore/light/src/client/mod.rs | 10 ++++---- 2 files changed, 31 insertions(+), 8 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 022bae04e..8256595c3 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -126,6 +126,11 @@ fn era_key(number: u64) -> String { format!("candidates_{}", number) } +/// Pending changes from `insert` to be applied after the database write has finished. +pub struct PendingChanges { + best_block: Option, // new best block. +} + /// Header chain. See module docs for more details. pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. @@ -203,10 +208,15 @@ impl HeaderChain { /// Insert a pre-verified header. /// /// This blindly trusts that the data given to it is sensible. - pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result<(), BlockError> { + /// Returns a set of pending changes to be applied with `apply_pending` + /// before the next call to insert and after the transaction has been written. + pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result { let hash = header.hash(); let number = header.number(); let parent_hash = *header.parent_hash(); + let mut pending = PendingChanges { + best_block: None, + }; // hold candidates the whole time to guard import order. let mut candidates = self.candidates.write(); @@ -286,11 +296,11 @@ impl HeaderChain { } trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); - *self.best_block.write() = BlockDescriptor { + pending.best_block = Some(BlockDescriptor { hash: hash, number: number, total_difficulty: total_difficulty, - }; + }); // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); @@ -334,7 +344,15 @@ impl HeaderChain { stream.append(&best_num).append(&latest_num); transaction.put(self.col, CURRENT_KEY, &stream.out()) } - Ok(()) + Ok(pending) + } + + /// Apply pending changes from a previous `insert` operation. + /// Must be done before the next `insert` call. + pub fn apply_pending(&self, pending: PendingChanges) { + if let Some(best_block) = pending.best_block { + *self.best_block.write() = best_block; + } } /// Get a block header. In the case of query by number, only canonical blocks @@ -360,6 +378,9 @@ impl HeaderChain { .and_then(load_from_db) } BlockId::Latest | BlockId::Pending => { + // hold candidates hear to prevent deletion of the header + // as we read it. + let _candidates = self.candidates.read(); let hash = { let best = self.best_block.read(); if best.number == 0 { diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 92866da6d..d294053e1 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -223,18 +223,20 @@ impl Client { let (num, hash) = (verified_header.number(), verified_header.hash()); let mut tx = self.db.transaction(); - match self.chain.insert(&mut tx, verified_header) { - Ok(()) => { + let pending = match self.chain.insert(&mut tx, verified_header) { + Ok(pending) => { good.push(hash); self.report.write().blocks_imported += 1; + pending } Err(e) => { debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e); bad.push(hash); + break; } - } + }; self.db.write_buffered(tx); - + self.chain.apply_pending(pending); if let Err(e) = self.db.flush() { panic!("Database flush failed: {}. Check disk health and space.", e); } From 4eb69dc0fe09ff2252dd8754bfbb2a865021640d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 04:36:49 +0100 Subject: [PATCH 58/91] reintroduce credits recharging --- ethcore/light/src/net/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 624bd1041..395d42d0d 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -303,10 +303,18 @@ impl LightProtocol { match peer.remote_flow { None => Err(Error::NotServer), Some((ref mut creds, ref params)) => { + // apply recharge to credits if there's no pending requests. + if peer.pending_requests.is_empty() { + params.recharge(creds); + } + // compute and deduct cost. let cost = params.compute_cost_multi(requests.requests()); creds.deduct_cost(cost)?; + trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", + peer_id, cost, creds.current()); + let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); io.send(*peer_id, packet::REQUEST, { let mut stream = RlpStream::new_list(2); @@ -686,6 +694,8 @@ impl LightProtocol { trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); // deserialize requests, check costs and request validity. + self.flow_params.recharge(&mut peer.local_credits); + peer.local_credits.deduct_cost(self.flow_params.base_cost())?; for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; @@ -712,6 +722,7 @@ impl LightProtocol { }); trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); + trace!(target: "pip", "Peer {} has {} credits remaining.", peer_id, peer.local_credits.current()); io.respond(packet::RESPONSE, { let mut stream = RlpStream::new_list(3); From 77f036ee21b573407fd3ca3b4544941f9cc66232 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 04:38:08 +0100 Subject: [PATCH 59/91] fix capabilities-interpreting error in on_demand --- ethcore/light/src/on_demand/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 6e37a74ae..20875c1a1 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -57,15 +57,15 @@ impl Peer { self.capabilities.serve_headers && self.status.head_num > req.num(), Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, Pending::Block(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.header.number()), Pending::BlockReceipts(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.0.number()), + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.0.number()), Pending::Account(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), Pending::Code(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.block_id.1), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.block_id.1), Pending::TxProof(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), } } } From ec52a4a2357292452d7a393788a11b4ba9895bed Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 13:24:04 +0100 Subject: [PATCH 60/91] more tracing in on-demand --- ethcore/light/src/on_demand/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 20875c1a1..f658a1f2c 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -319,7 +319,6 @@ impl OnDemand { } } - trace!(target: "on_demand", "No suitable peer for request"); self.orphaned_requests.write().push(pending); } @@ -353,6 +352,7 @@ impl OnDemand { let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); + trace!(target: "on_demand", "Attempting to dispatch {} orphaned requests.", to_dispatch.len()); for mut orphaned in to_dispatch { let hung_up = match orphaned { Pending::HeaderProof(_, ref mut sender) => match *sender { From a1df49ef3ee623c02c50f5be60e40e559bb56170 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:04:26 +0100 Subject: [PATCH 61/91] add test for request vec deserialization --- ethcore/light/src/types/request/mod.rs | 27 ++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 83d7963ac..c7bc8776d 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -1707,4 +1707,31 @@ mod tests { assert_eq!(rlp.val_at::(0).unwrap(), 100usize); assert_eq!(rlp.list_at::(1).unwrap(), reqs); } + + #[test] + fn responses_vec() { + let mut stream = RlpStream::new_list(2); + stream.begin_list(0).begin_list(0); + + let body = ::ethcore::encoded::Body::new(stream.out()); + let reqs = vec![ + Response::Headers(HeadersResponse { headers: vec![] }), + Response::HeaderProof(HeaderProofResponse { proof: vec![], hash: Default::default(), td: 100.into()}), + Response::Receipts(ReceiptsResponse { receipts: vec![Default::default()] }), + Response::Body(BodyResponse { body: body }), + Response::Account(AccountResponse { + proof: vec![], + nonce: 100.into(), + balance: 123.into(), + code_hash: Default::default(), + storage_root: Default::default() + }), + Response::Storage(StorageResponse { proof: vec![], value: H256::default() }), + Response::Code(CodeResponse { code: vec![1, 2, 3, 4, 5] }), + Response::Execution(ExecutionResponse { items: vec![] }), + ]; + + let raw = ::rlp::encode_list(&reqs); + assert_eq!(::rlp::decode_list::(&raw), reqs); + } } From ac7f1f6719621e9390f1f97bc64a6aaa43cf97d0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:15:13 +0100 Subject: [PATCH 62/91] fix header chain tests --- ethcore/light/src/client/header_chain.rs | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 8256595c3..1c218204b 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -517,8 +517,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -548,8 +549,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -566,8 +568,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -589,8 +592,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 11; } @@ -640,8 +644,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -676,8 +681,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -692,8 +698,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } From 54eb575000c870948239179bbdc23c7e6cd9c902 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:38:23 +0100 Subject: [PATCH 63/91] request tests that demonstrate broken RLP behavior --- ethcore/light/src/types/request/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index c7bc8776d..f640687d5 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -1601,7 +1601,7 @@ mod tests { let full_req = Request::Account(req.clone()); let res = AccountResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], nonce: 100.into(), balance: 123456.into(), code_hash: Default::default(), @@ -1625,7 +1625,7 @@ mod tests { let full_req = Request::Storage(req.clone()); let res = StorageResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], value: H256::default(), }; let full_res = Response::Storage(res.clone()); From 1485dd07aed9b83ee5eafcc61bea1968da557c7c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:38:32 +0100 Subject: [PATCH 64/91] use prev credits in tracing --- ethcore/light/src/net/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 395d42d0d..56b078b3b 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -309,11 +309,12 @@ impl LightProtocol { } // compute and deduct cost. + let pre_creds = creds.current(); let cost = params.compute_cost_multi(requests.requests()); creds.deduct_cost(cost)?; trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", - peer_id, cost, creds.current()); + peer_id, cost, pre_creds); let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); io.send(*peer_id, packet::REQUEST, { From c75b49667eb15982db6b2da083f562a6055f5896 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:49:02 +0100 Subject: [PATCH 65/91] workaround for #5008 --- ethcore/light/src/types/request/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index f640687d5..0e11b8a7f 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -1051,8 +1051,9 @@ pub mod account { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { + let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: rlp.list_at(0)?, + proof: proof?, nonce: rlp.val_at(1)?, balance: rlp.val_at(2)?, code_hash: rlp.val_at(3)?, @@ -1198,8 +1199,9 @@ pub mod storage { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { + let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: rlp.list_at(0)?, + proof: proof?, value: rlp.val_at(1)?, }) } From 10a470a5fa7eb2f8102a7a741e6ab4f97ed4c5cd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 15:44:16 +0100 Subject: [PATCH 66/91] better bookkeeping of requests in light sync --- ethcore/light/src/net/context.rs | 2 ++ ethcore/light/src/on_demand/mod.rs | 16 +++++++++------- ethcore/light/src/types/request/mod.rs | 3 ++- sync/src/light_sync/mod.rs | 25 +++++++++++++++++++++++-- 4 files changed, 36 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 9eafead57..64ddd19a3 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -61,10 +61,12 @@ impl<'a> IoContext for NetworkContext<'a> { } fn disconnect_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disconnect of peer {}", peer); NetworkContext::disconnect_peer(self, peer); } fn disable_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disable of peer {}", peer); NetworkContext::disable_peer(self, peer); } diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index f658a1f2c..bc1ba4fb7 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -424,6 +424,8 @@ impl Handler for OnDemand { } }; + trace!(target: "on_demand", "Handling response for request {}, kind={:?}", req_id, response.kind()); + // handle the response appropriately for the request. // all branches which do not return early lead to disabling of the peer // due to misbehavior. @@ -443,7 +445,7 @@ impl Handler for OnDemand { } return } - Err(e) => warn!("Error handling response for header request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), } } } @@ -456,7 +458,7 @@ impl Handler for OnDemand { let _ = sender.send(header); return } - Err(e) => warn!("Error handling response for header request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), } } } @@ -469,7 +471,7 @@ impl Handler for OnDemand { let _ = sender.send(block); return } - Err(e) => warn!("Error handling response for block request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for block request: {:?}", e), } } } @@ -482,7 +484,7 @@ impl Handler for OnDemand { let _ = sender.send(receipts); return } - Err(e) => warn!("Error handling response for receipts request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for receipts request: {:?}", e), } } } @@ -495,7 +497,7 @@ impl Handler for OnDemand { let _ = sender.send(maybe_account); return } - Err(e) => warn!("Error handling response for state request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e), } } } @@ -506,7 +508,7 @@ impl Handler for OnDemand { let _ = sender.send(response.code.clone()); return } - Err(e) => warn!("Error handling response for code request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for code request: {:?}", e), } } } @@ -521,7 +523,7 @@ impl Handler for OnDemand { let _ = sender.send(Err(err)); return } - ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), + ProvedExecution::BadProof => warn!(target: "on_demand", "Error handling response for transaction proof request"), } } } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 0e11b8a7f..062f5e445 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -435,7 +435,8 @@ impl Response { } } - fn kind(&self) -> Kind { + /// Inspect the kind of this response. + pub fn kind(&self) -> Kind { match *self { Response::Headers(_) => Kind::Headers, Response::HeaderProof(_) => Kind::HeaderProof, diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 18fe3c953..2bc179a21 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -32,7 +32,7 @@ //! announced blocks. //! - On bad block/response, punish peer and reset. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::mem; use std::sync::Arc; @@ -150,6 +150,19 @@ impl AncestorSearch { } } + fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch { + match self { + AncestorSearch::Awaiting(id, start, req) => { + if req_ids.iter().find(|&x| x == &id).is_some() { + AncestorSearch::Queued(start) + } else { + AncestorSearch::Awaiting(id, start, req) + } + } + other => other, + } + } + fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch where F: FnMut(HeadersRequest) -> Option { @@ -209,6 +222,7 @@ pub struct LightSync { start_block_number: u64, best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. + pending_reqs: Mutex>, // requests from this handler. client: Arc, rng: Mutex, state: Mutex, @@ -271,7 +285,8 @@ impl Handler for LightSync { *state = match mem::replace(&mut *state, SyncState::Idle) { SyncState::Idle => SyncState::Idle, - SyncState::AncestorSearch(search) => SyncState::AncestorSearch(search), + SyncState::AncestorSearch(search) => + SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)), SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)), }; } @@ -321,6 +336,10 @@ impl Handler for LightSync { return } + if !self.pending_reqs.lock().remove(&req_id) { + return + } + let headers = match responses.get(0) { Some(&request::Response::Headers(ref response)) => &response.headers[..], Some(_) => { @@ -496,6 +515,7 @@ impl LightSync { for peer in &peer_ids { match ctx.request_from(*peer, request.clone()) { Ok(id) => { + self.pending_reqs.lock().insert(id.clone()); return Some(id) } Err(NetError::NoCredits) => {} @@ -529,6 +549,7 @@ impl LightSync { start_block_number: client.as_light_client().chain_info().best_block_number, best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), + pending_reqs: Mutex::new(HashSet::new()), client: client, rng: Mutex::new(try!(OsRng::new())), state: Mutex::new(SyncState::Idle), From 974f89d5bfd0d4d63312ddc308b76fe42bde04fc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 16:00:00 +0100 Subject: [PATCH 67/91] correct workaround for RLP issue --- ethcore/light/src/types/request/mod.rs | 35 ++++++++++---------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 062f5e445..4c55cdfaf 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -738,12 +738,10 @@ pub mod header_proof { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - - s.append(&self.hash).append(&self.td); + s.begin_list(3) + .append_list::,_>(&self.proof[..]) + .append(&self.hash) + .append(&self.td); } } } @@ -1052,9 +1050,8 @@ pub mod account { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: proof?, + proof: rlp.list_at(0)?, nonce: rlp.val_at(1)?, balance: rlp.val_at(2)?, code_hash: rlp.val_at(3)?, @@ -1065,12 +1062,9 @@ pub mod account { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(5).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - - s.append(&self.nonce) + s.begin_list(5) + .append_list::,_>(&self.proof[..]) + .append(&self.nonce) .append(&self.balance) .append(&self.code_hash) .append(&self.storage_root); @@ -1200,9 +1194,8 @@ pub mod storage { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: proof?, + proof: rlp.list_at(0)?, value: rlp.val_at(1)?, }) } @@ -1210,11 +1203,9 @@ pub mod storage { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - s.append(&self.value); + s.begin_list(2) + .append_list::,_>(&self.proof[..]) + .append(&self.value); } } } @@ -1543,7 +1534,7 @@ mod tests { let full_req = Request::HeaderProof(req.clone()); let res = HeaderProofResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], hash: Default::default(), td: 100.into(), }; From 5700f4ac8186dd959d64f68791055774fd59c902 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 18:31:16 +0100 Subject: [PATCH 68/91] fix block response decoding --- ethcore/light/src/net/mod.rs | 2 +- ethcore/light/src/on_demand/mod.rs | 8 ++++++-- ethcore/light/src/types/request/mod.rs | 23 +++++++++++++++++------ rpc/src/v1/impls/light/eth.rs | 1 - 4 files changed, 24 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 56b078b3b..4df83bf1c 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -407,7 +407,7 @@ impl LightProtocol { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "pip", "pre-verifying response from peer {}", peer); + trace!(target: "pip", "pre-verifying response for {} from peer {}", req_id, peer); let peers = self.peers.read(); let res = match peers.get(peer) { diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index bc1ba4fb7..279d7e2ac 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -37,7 +37,7 @@ use rlp::RlpStream; use util::{Bytes, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; +use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; @@ -303,17 +303,21 @@ impl OnDemand { let complete = builder.build(); + let kind = complete.requests()[0].kind(); for (id, peer) in self.peers.read().iter() { if !peer.can_handle(&pending) { continue } match ctx.request_from(*id, complete.clone()) { Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); + trace!(target: "on_demand", "{}: Assigned {:?} to peer {}", + req_id, kind, id); + self.pending_requests.write().insert( req_id, pending, ); return } + Err(net::Error::NoCredits) => {} Err(e) => trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 4c55cdfaf..aab892270 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -244,7 +244,8 @@ pub enum CompleteRequest { } impl Request { - fn kind(&self) -> Kind { + /// Get the request kind. + pub fn kind(&self) -> Kind { match *self { Request::Headers(_) => Kind::Headers, Request::HeaderProof(_) => Kind::HeaderProof, @@ -727,7 +728,6 @@ pub mod header_proof { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - Ok(Response { proof: rlp.list_at(0)?, hash: rlp.val_at(1)?, @@ -825,7 +825,6 @@ pub mod block_receipts { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - Ok(Response { receipts: rlp.as_list()?, }) @@ -922,8 +921,8 @@ pub mod block_body { use ethcore::transaction::UnverifiedTransaction; // check body validity. - let _: Vec = rlp.list_at(0)?; - let _: Vec = rlp.list_at(1)?; + let _: Vec = rlp.list_at(0)?; + let _: Vec = rlp.list_at(1)?; Ok(Response { body: encoded::Body::new(rlp.as_raw().to_owned()), @@ -1480,9 +1479,16 @@ mod tests { fn check_roundtrip(val: T) where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug { + // check as single value. let bytes = ::rlp::encode(&val); let new_val: T = ::rlp::decode(&bytes); assert_eq!(val, new_val); + + // check as list containing single value. + let list = [val]; + let bytes = ::rlp::encode_list(&list); + let new_list: Vec = ::rlp::decode_list(&bytes); + assert_eq!(&list, &new_list[..]); } #[test] @@ -1566,6 +1572,7 @@ mod tests { #[test] fn body_roundtrip() { + use ethcore::transaction::{Transaction, UnverifiedTransaction}; let req = IncompleteBodyRequest { hash: Field::Scalar(Default::default()), }; @@ -1573,8 +1580,12 @@ mod tests { let full_req = Request::Body(req.clone()); let res = BodyResponse { body: { + let header = ::ethcore::header::Header::default(); + let tx = UnverifiedTransaction::from(Transaction::default().fake_sign(Default::default())); let mut stream = RlpStream::new_list(2); - stream.begin_list(0).begin_list(0); + stream.begin_list(2).append(&tx).append(&tx) + .begin_list(1).append(&header); + ::ethcore::encoded::Body::new(stream.out()) }, }; diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index ba32bf35c..1851f479e 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -166,7 +166,6 @@ impl EthClient { fn proved_execution(&self, req: CallRequest, num: Trailing) -> BoxFuture { const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]); - let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone()); let req: CRequest = req.into(); let id = num.0.into(); From 45c0a971426f83da7dc0a9772e426b13f147f69a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 18:49:26 +0100 Subject: [PATCH 69/91] fix body encoding --- ethcore/light/src/on_demand/mod.rs | 4 +++- ethcore/light/src/on_demand/request.rs | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 279d7e2ac..a7c1ba2c4 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -235,8 +235,10 @@ impl OnDemand { match { self.cache.lock().block_body(&req.hash) } { Some(body) => { let mut stream = RlpStream::new_list(3); + let body = body.rlp(); stream.append_raw(&req.header.into_inner(), 1); - stream.append_raw(&body.into_inner(), 2); + stream.append_raw(&body.at(0).as_raw(), 1); + stream.append_raw(&body.at(1).as_raw(), 1); sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index cda1d6feb..30337cc2c 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -151,7 +151,8 @@ impl Body { // concatenate the header and the body. let mut stream = RlpStream::new_list(3); stream.append_raw(self.header.rlp().as_raw(), 1); - stream.append_raw(&body.rlp().as_raw(), 2); + stream.append_raw(body.rlp().at(0).as_raw(), 1); + stream.append_raw(body.rlp().at(1).as_raw(), 1); Ok(encoded::Block::new(stream.out())) } From b76860fd2b700091c85d18bb9a5a68339fe0440c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 19:42:11 +0100 Subject: [PATCH 70/91] add signing RPC methods in light cli --- parity/rpc_apis.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 76e54ff81..c53e2c4a1 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -313,6 +313,20 @@ impl Dependencies for LightDependencies { self.transaction_queue.clone(), ); + macro_rules! add_signing_methods { + ($namespace:ident, $handler:expr, $deps:expr) => { + { + let deps = &$deps; + let dispatcher = dispatcher.clone(); + if deps.signer_service.is_enabled() { + $handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) + } else { + $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) + } + } + } + } + for api in apis { match *api { Api::Web3 => { @@ -332,7 +346,9 @@ impl Dependencies for LightDependencies { ); handler.extend_with(client.to_delegate()); - // TODO: filters and signing methods. + // TODO: filters. + add_signing_methods!(EthSigning, handler, self); + }, Api::Personal => { handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); @@ -355,9 +371,8 @@ impl Dependencies for LightDependencies { self.dapps_port, ).to_delegate()); - // TODO - //add_signing_methods!(EthSigning, handler, self); - //add_signing_methods!(ParitySigning, handler, self); + add_signing_methods!(EthSigning, handler, self); + add_signing_methods!(ParitySigning, handler, self); }, Api::ParityAccounts => { handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate()); From e0a79699eaa08f7a434f906b2ae262caa6122d2d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 20:02:46 +0100 Subject: [PATCH 71/91] transaction propagation on a timer --- ethcore/light/src/net/mod.rs | 51 +++++++++++++++++++++++++++++++++++- parity/rpc_apis.rs | 1 - sync/src/api.rs | 2 +- 3 files changed, 51 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 4df83bf1c..e32e92145 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -27,7 +27,7 @@ use util::hash::H256; use util::{DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fmt; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -61,6 +61,9 @@ const TIMEOUT_INTERVAL_MS: u64 = 1000; const TICK_TIMEOUT: TimerToken = 1; const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000; +const PROPAGATE_TIMEOUT: TimerToken = 2; +const PROPAGATE_TIMEOUT_INTERVAL_MS: u64 = 5000; + // minimum interval between updates. const UPDATE_INTERVAL_MS: i64 = 5000; @@ -132,6 +135,7 @@ pub struct Peer { last_update: SteadyTime, pending_requests: RequestSet, failed_requests: Vec, + propagated_transactions: HashSet, } /// A light protocol event handler. @@ -499,6 +503,47 @@ impl LightProtocol { } } + // propagate transactions to relay peers. + // if we aren't on the mainnet, we just propagate to all relay peers + fn propagate_transactions(&self, io: &IoContext) { + if self.capabilities.read().tx_relay { return } + + let ready_transactions = self.provider.ready_transactions(); + if ready_transactions.is_empty() { return } + + trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len()); + + let all_transaction_hashes: HashSet<_> = ready_transactions.iter().map(|tx| tx.hash()).collect(); + let mut buf = Vec::new(); + + let peers = self.peers.read(); + for (peer_id, peer_info) in peers.iter() { + let mut peer_info = peer_info.lock(); + if !peer_info.capabilities.tx_relay { continue } + + let prop_filter = &mut peer_info.propagated_transactions; + *prop_filter = &*prop_filter & &all_transaction_hashes; + + // fill the buffer with all non-propagated transactions. + let to_propagate = ready_transactions.iter() + .filter(|tx| prop_filter.insert(tx.hash())) + .map(|tx| &tx.transaction); + + buf.extend(to_propagate); + + // propagate to the given peer. + if buf.is_empty() { continue } + io.send(*peer_id, packet::SEND_TRANSACTIONS, { + let mut stream = RlpStream::new_list(buf.len()); + for pending_tx in buf.drain(..) { + stream.append(pending_tx); + } + + stream.out() + }) + } + } + /// called when a peer connects. pub fn on_connect(&self, peer: &PeerId, io: &IoContext) { let proto_version = match io.protocol_version(*peer).ok_or(Error::WrongNetwork) { @@ -613,6 +658,7 @@ impl LightProtocol { last_update: pending.last_update, pending_requests: RequestSet::default(), failed_requests: Vec::new(), + propagated_transactions: HashSet::new(), })); for handler in &self.handlers { @@ -797,6 +843,8 @@ impl NetworkProtocolHandler for LightProtocol { .expect("Error registering sync timer."); io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL_MS) .expect("Error registering sync timer."); + io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL_MS) + .expect("Error registering sync timer."); } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { @@ -815,6 +863,7 @@ impl NetworkProtocolHandler for LightProtocol { match timer { TIMEOUT => self.timeout_check(io), TICK_TIMEOUT => self.tick_handlers(io), + PROPAGATE_TIMEOUT => self.propagate_transactions(io), _ => warn!(target: "pip", "received timeout on unknown token {}", timer), } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index c53e2c4a1..5cfb28474 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -348,7 +348,6 @@ impl Dependencies for LightDependencies { // TODO: filters. add_signing_methods!(EthSigning, handler, self); - }, Api::Personal => { handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); diff --git a/sync/src/api.rs b/sync/src/api.rs index bfa33e7b8..3e3234d84 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -414,7 +414,7 @@ struct TxRelay(Arc); impl LightHandler for TxRelay { fn on_transactions(&self, ctx: &EventContext, relay: &[::ethcore::transaction::UnverifiedTransaction]) { - trace!(target: "les", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); + trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx).to_vec()).collect(), ctx.peer()) } } From 3708b3be63566fc0d210cbd687e2a561633e1f08 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 20:49:17 +0100 Subject: [PATCH 72/91] fix RPC tests --- rpc/src/v1/tests/mocked/parity.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index 3af627037..28ff8ff5c 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -302,7 +302,7 @@ fn rpc_parity_net_peers() { let io = deps.default_client(); let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"les":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"les":null}}]},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"pip":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"pip":null}}]},"id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } From a78068cbe966f729b85c1ad0cdedac6ec516a912 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 22:20:00 +0100 Subject: [PATCH 73/91] queue culling and informant --- Cargo.lock | 1 + Cargo.toml | 1 + ethcore/light/src/client/service.rs | 9 ++- parity/light_helpers/mod.rs | 21 ++++++ parity/light_helpers/queue_cull.rs | 99 +++++++++++++++++++++++++++++ parity/main.rs | 2 + parity/run.rs | 25 +++++++- 7 files changed, 154 insertions(+), 4 deletions(-) create mode 100644 parity/light_helpers/mod.rs create mode 100644 parity/light_helpers/queue_cull.rs diff --git a/Cargo.lock b/Cargo.lock index d572dcf79..7e0302e14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,7 @@ dependencies = [ "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", diff --git a/Cargo.toml b/Cargo.toml index 8420c5459..66c8674df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ toml = "0.2" serde = "0.9" serde_json = "0.9" app_dirs = "1.1.1" +futures = "0.1" fdlimit = "0.1" ws2_32-sys = "0.2" hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index 89538fec2..55795d870 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -50,7 +50,7 @@ impl fmt::Display for Error { /// Light client service. pub struct Service { client: Arc, - _io_service: IoService, + io_service: IoService, } impl Service { @@ -82,10 +82,15 @@ impl Service { io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; Ok(Service { client: client, - _io_service: io_service, + io_service: io_service, }) } + /// Register an I/O handler on the service. + pub fn register_handler(&self, handler: Arc + Send>) -> Result<(), IoError> { + self.io_service.register_handler(handler) + } + /// Get a handle to the client. pub fn client(&self) -> &Arc { &self.client diff --git a/parity/light_helpers/mod.rs b/parity/light_helpers/mod.rs new file mode 100644 index 000000000..488f970c2 --- /dev/null +++ b/parity/light_helpers/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Utilities and helpers for the light client. + +mod queue_cull; + +pub use self::queue_cull::QueueCull; diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs new file mode 100644 index 000000000..10865d485 --- /dev/null +++ b/parity/light_helpers/queue_cull.rs @@ -0,0 +1,99 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Service for culling the light client's transaction queue. + +use std::sync::Arc; +use std::time::Duration; + +use ethcore::service::ClientIoMessage; +use ethsync::LightSync; +use io::{IoContext, IoHandler, TimerToken}; + +use light::client::Client; +use light::on_demand::{request, OnDemand}; +use light::TransactionQueue; + +use futures::{future, stream, Future, Stream}; + +use parity_reactor::Remote; + +use util::RwLock; + +// Attepmt to cull once every 10 minutes. +const TOKEN: TimerToken = 1; +const TIMEOUT_MS: u64 = 1000 * 60 * 10; + +// But make each attempt last only 9 minutes +const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9; + +/// Periodically culls the transaction queue of mined transactions. +pub struct QueueCull { + /// A handle to the client, for getting the latest block header. + pub client: Arc, + /// A handle to the sync service. + pub sync: Arc, + /// The on-demand request service. + pub on_demand: Arc, + /// The transaction queue. + pub txq: Arc>, + /// Event loop remote. + pub remote: Remote, +} + +impl IoHandler for QueueCull { + fn initialize(&self, io: &IoContext) { + io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer"); + } + + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if timer != TOKEN { return } + + let senders = self.txq.read().queued_senders(); + if senders.is_empty() { return } + + let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone()); + let best_header = self.client.best_block_header(); + let start_nonce = self.client.engine().account_start_nonce(); + + info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len()); + self.remote.spawn_with_timeout(move || { + let maybe_fetching = sync.with_context(move |ctx| { + // fetch the nonce of each sender in the queue. + let nonce_futures = senders.iter() + .map(|&address| request::Account { header: best_header.clone(), address: address }) + .map(|request| on_demand.account(ctx, request)) + .map(move |fut| fut.map(move |x| x.map(|acc| acc.nonce).unwrap_or(start_nonce))) + .zip(senders.iter()) + .map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce))); + + // as they come in, update each sender to the new nonce. + stream::futures_unordered(nonce_futures) + .fold(txq, |txq, (address, nonce)| { + txq.write().cull(address, nonce); + future::ok(txq) + }) + .map(|_| ()) // finally, discard the txq handle and log errors. + .map_err(|_| debug!(target: "cull", "OnDemand prematurely closed channel.")) + }); + + match maybe_fetching { + Some(fut) => fut.boxed(), + None => future::ok(()).boxed(), + } + }, Duration::from_millis(PURGE_TIMEOUT_MS), || {}) + } +} diff --git a/parity/main.rs b/parity/main.rs index 2044b3ee0..cde0e6c1f 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -28,6 +28,7 @@ extern crate ctrlc; extern crate docopt; extern crate env_logger; extern crate fdlimit; +extern crate futures; extern crate hyper; extern crate isatty; extern crate jsonrpc_core; @@ -101,6 +102,7 @@ mod deprecated; mod dir; mod helpers; mod informant; +mod light_helpers; mod migration; mod modules; mod params; diff --git a/parity/run.rs b/parity/run.rs index b3d9bf90b..cf7d5e82c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -238,12 +238,24 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> }; let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; let light_sync = Arc::new(light_sync); - light_sync.start_network(); - // start RPCs. // spin up event loop let event_loop = EventLoop::spawn(); + // queue cull service. + let queue_cull = Arc::new(::light_helpers::QueueCull { + client: service.client().clone(), + sync: light_sync.clone(), + on_demand: on_demand.clone(), + txq: txq.clone(), + remote: event_loop.remote(), + }); + + service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?; + + // start the network. + light_sync.start_network(); + // fetch service let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; @@ -253,6 +265,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> let rpc_stats = Arc::new(informant::RpcStats::default()); let signer_path = cmd.signer_conf.signer_path.clone(); + // start RPCs let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { signer_service: Arc::new(rpc_apis::SignerService::new(move || { signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) @@ -299,6 +312,14 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> // TODO: Dapps + // minimal informant thread. Just prints block number every 5 seconds. + // TODO: integrate with informant.rs + let informant_client = service.client().clone(); + ::std::thread::spawn(move || loop { + info!("#{}", informant_client.best_block_header().number()); + ::std::thread::sleep(::std::time::Duration::from_secs(5)); + }); + // wait for ctrl-c. Ok(wait_for_exit(panic_handler, None, None, can_restart)) } From ac057ebe935505a706f2aa733885e469dc2c656c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 22:36:15 +0100 Subject: [PATCH 74/91] fix test build --- ethcore/light/src/net/tests/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 67dfe8131..6dc5fbe7e 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -612,6 +612,7 @@ fn id_guard() { last_update: ::time::SteadyTime::now(), pending_requests: pending_requests, failed_requests: Vec::new(), + propagated_transactions: Default::default(), })); // first, malformed responses. From 2f757babb9d29607682fcb5a8c1f6af18033281c Mon Sep 17 00:00:00 2001 From: Vadim Sloun Date: Thu, 30 Mar 2017 23:16:54 +0300 Subject: [PATCH 75/91] fix for Ubuntu Dockerfile --- docker/ubuntu/Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 475555be9..0ee84e1c5 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -8,7 +8,10 @@ RUN apt-get update && \ curl \ git \ file \ - binutils + binutils \ + libssl-dev \ + pkg-config \ + libudev-dev # install rustup RUN curl https://sh.rustup.rs -sSf | sh -s -- -y From 6a05967bef1f9abcf548b4c9f9bff1aefb3a1ac2 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Mon, 3 Apr 2017 10:25:21 +0200 Subject: [PATCH 76/91] trigger js build release (#5379) --- js/scripts/test.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/js/scripts/test.js b/js/scripts/test.js index f5bfb0835..e426642db 100644 --- a/js/scripts/test.js +++ b/js/scripts/test.js @@ -1,2 +1 @@ -// test script 9 -// trigger rebuild on master 15 Mar 2017, 11:19 +// test script 10 From 2df4532d5078ef5cac8ccc61774d4cb8b3ba5438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 3 Apr 2017 10:27:37 +0200 Subject: [PATCH 77/91] Dapps and RPC server merge (#5365) * Dapps server as a middleware. * Dapps RPC - Work in Progress * Merging Dapps and RPC server. * Fast HTTP server configuration. * Bump jsonrpc * Fixing test target * Re-implementing commented-out tests. --- Cargo.lock | 148 +++++++--- Cargo.toml | 13 +- dapps/Cargo.toml | 7 +- dapps/src/api/api.rs | 40 +-- dapps/src/apps/fetcher/mod.rs | 10 +- dapps/src/endpoint.rs | 3 +- dapps/src/handlers/auth.rs | 44 --- dapps/src/handlers/mod.rs | 2 - dapps/src/lib.rs | 358 +++++-------------------- dapps/src/{router/mod.rs => router.rs} | 130 +++------ dapps/src/router/auth.rs | 106 -------- dapps/src/router/host_validation.rs | 42 --- dapps/src/rpc.rs | 4 +- dapps/src/tests/api.rs | 25 +- dapps/src/tests/authorization.rs | 80 ------ dapps/src/tests/helpers/mod.rs | 212 ++++++++++++--- dapps/src/tests/mod.rs | 1 - dapps/src/tests/rpc.rs | 73 +---- dapps/src/tests/validation.rs | 30 +-- hash-fetch/src/urlhint.rs | 3 +- ipfs/src/lib.rs | 3 +- parity/cli/mod.rs | 51 ++-- parity/cli/usage.txt | 30 +-- parity/configuration.rs | 63 ++--- parity/dapps.rs | 149 +++------- parity/deprecated.rs | 125 +++++---- parity/ipfs.rs | 3 +- parity/main.rs | 7 +- parity/rpc.rs | 133 +++++---- parity/rpc_apis.rs | 2 +- parity/run.rs | 43 ++- rpc/Cargo.toml | 1 + rpc/src/lib.rs | 100 ++++++- rpc/src/metadata.rs | 74 +++++ scripts/targets.sh | 2 +- 35 files changed, 869 insertions(+), 1248 deletions(-) delete mode 100644 dapps/src/handlers/auth.rs rename dapps/src/{router/mod.rs => router.rs} (71%) delete mode 100644 dapps/src/router/auth.rs delete mode 100644 dapps/src/router/host_validation.rs delete mode 100644 dapps/src/tests/authorization.rs create mode 100644 rpc/src/metadata.rs diff --git a/Cargo.lock b/Cargo.lock index 8a70f35a2..83e643900 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,7 +10,6 @@ dependencies = [ "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.7.0", - "ethcore-dapps 1.7.0", "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", "ethcore-ipc 1.7.0", @@ -27,12 +26,12 @@ dependencies = [ "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-dapps 1.7.0", "parity-hash-fetch 1.7.0", "parity-ipfs-api 1.7.0", "parity-local-store 0.1.0", @@ -40,6 +39,7 @@ dependencies = [ "parity-rpc-client 1.4.0", "parity-updater 1.7.0", "path 0.1.0", + "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -309,6 +309,11 @@ dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "difference" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "docopt" version = "0.7.0" @@ -446,40 +451,6 @@ dependencies = [ "siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ethcore-dapps" -version = "1.7.0" -dependencies = [ - "base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-devtools 1.7.0", - "ethcore-rpc 1.7.0", - "ethcore-util 1.7.0", - "fetch 0.1.0", - "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-hash-fetch 1.7.0", - "parity-reactor 0.1.0", - "parity-ui 1.7.0", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "ethcore-devtools" version = "1.7.0" @@ -642,6 +613,7 @@ dependencies = [ "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", @@ -1088,7 +1060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1100,7 +1072,7 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1113,7 +1085,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1125,17 +1097,31 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "jsonrpc-minihttp-server" +version = "7.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" +dependencies = [ + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)", + "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1145,7 +1131,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1156,7 +1142,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1604,6 +1590,38 @@ dependencies = [ "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parity-dapps" +version = "1.7.0" +dependencies = [ + "base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-devtools 1.7.0", + "ethcore-util 1.7.0", + "fetch 0.1.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-hash-fetch 1.7.0", + "parity-reactor 0.1.0", + "parity-ui 1.7.0", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parity-dapps-glue" version = "1.7.0" @@ -1826,6 +1844,14 @@ name = "podio" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "pretty_assertions" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "primal" version = "0.2.3" @@ -2431,6 +2457,21 @@ dependencies = [ "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-minihttp" +version = "0.1.0" +source = "git+https://github.com/tomusdrw/tokio-minihttp#8acbafae3e77e7f7eb516b441ec84695580221dd" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-named-pipes" version = "0.1.0" @@ -2441,6 +2482,22 @@ dependencies = [ "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-proto" +version = "0.1.0" +source = "git+https://github.com/tomusdrw/tokio-proto#f6ee08cb594fa2fc1b4178eaaca0855d66e68fd3" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-proto" version = "0.1.0" @@ -2706,6 +2763,7 @@ dependencies = [ "checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "" "checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf" "checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf" +"checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8" "checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8" "checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" "checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91" @@ -2739,6 +2797,7 @@ dependencies = [ "checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" @@ -2802,6 +2861,7 @@ dependencies = [ "checksum phf_shared 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "fee4d039930e4f45123c9b15976cf93a499847b6483dc09c42ea0ec4940f2aa6" "checksum pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8cee804ecc7eaf201a4a207241472cc870e825206f6c031e3ee2a72fa425f2fa" "checksum podio 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e5422a1ee1bc57cc47ae717b0137314258138f38fd5f3cea083f43a9725383a0" +"checksum pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2412f3332a07c7a2a50168988dcc184f32180a9758ad470390e5f55e089f6b6e" "checksum primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0e31b86efadeaeb1235452171a66689682783149a6249ff334a2c5d8218d00a4" "checksum primal-bit 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "464a91febc06166783d4f5ba3577b5ed8dda8e421012df80bfe48a971ed7be8f" "checksum primal-check 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "647c81b67bb9551a7b88d0bcd785ac35b7d0bf4b2f358683d7c2375d04daec51" @@ -2871,7 +2931,9 @@ dependencies = [ "checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b" "checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473" "checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "" +"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "" "checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "" +"checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "" "checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a" diff --git a/Cargo.toml b/Cargo.toml index 737c21b09..b82490e88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,6 @@ serde_json = "0.9" app_dirs = "1.1.1" fdlimit = "0.1" ws2_32-sys = "0.2" -hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethsync = { path = "sync" } @@ -50,8 +49,9 @@ parity-ipfs-api = { path = "ipfs" } parity-updater = { path = "updater" } parity-reactor = { path = "util/reactor" } parity-local-store = { path = "local-store" } -ethcore-dapps = { path = "dapps", optional = true } path = { path = "util/path" } + +parity-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.103", optional = true} ethcore-secretstore = { path = "secret_store", optional = true } @@ -60,6 +60,7 @@ rustc_version = "0.2" [dev-dependencies] ethcore-ipc-tests = { path = "ipc/tests" } +pretty_assertions = "0.1" [target.'cfg(windows)'.dependencies] winapi = "0.2" @@ -71,18 +72,18 @@ daemonize = "0.2" default = ["ui-precompiled"] ui = [ "dapps", - "ethcore-dapps/ui", + "parity-dapps/ui", "ethcore-signer/ui", ] ui-precompiled = [ "dapps", "ethcore-signer/ui-precompiled", - "ethcore-dapps/ui-precompiled", + "parity-dapps/ui-precompiled", ] -dapps = ["ethcore-dapps"] +dapps = ["parity-dapps"] ipc = ["ethcore/ipc", "ethsync/ipc"] jit = ["ethcore/jit"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "parity-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] test-heavy = ["ethcore/test-heavy"] ethkey-cli = ["ethcore/ethkey-cli"] diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 07f136d78..429ed01f5 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "Parity Dapps crate" -name = "ethcore-dapps" +name = "parity-dapps" version = "1.7.0" license = "GPL-3.0" authors = ["Parity Technologies "] @@ -28,11 +28,8 @@ zip = { version = "0.1", default-features = false } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -# TODO [ToDr] Temporary solution, server should be merged with RPC. -jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-devtools = { path = "../devtools" } -ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } fetch = { path = "../util/fetch" } parity-hash-fetch = { path = "../hash-fetch" } @@ -42,7 +39,7 @@ parity-ui = { path = "./ui" } clippy = { version = "0.0.103", optional = true} [features] -dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"] +dev = ["clippy", "ethcore-util/dev"] ui = ["parity-ui/no-precompiled-js"] ui-precompiled = ["parity-ui/use-precompiled-js"] diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index e07bd4535..df3386358 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::Arc; use unicase::UniCase; use hyper::{server, net, Decoder, Encoder, Next, Control}; use hyper::header; @@ -26,48 +25,49 @@ use apps::fetcher::Fetcher; use handlers::extract_url; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; -use jsonrpc_http_server; -use jsonrpc_server_utils::cors; +use jsonrpc_http_server::{self, AccessControlAllowOrigin}; #[derive(Clone)] -pub struct RestApi { - cors_domains: Option>, - endpoints: Arc, - fetcher: Arc, +pub struct RestApi { + // TODO [ToDr] cors_domains should be handled by the server to avoid duplicated logic. + // RequestMiddleware should be able to tell that cors headers should be included. + cors_domains: Option>, + apps: Vec, + fetcher: F, } -impl RestApi { - pub fn new(cors_domains: Vec, endpoints: Arc, fetcher: Arc) -> Box { +impl RestApi { + pub fn new(cors_domains: Vec, endpoints: &Endpoints, fetcher: F) -> Box { Box::new(RestApi { cors_domains: Some(cors_domains), - endpoints: endpoints, + apps: Self::list_apps(endpoints), fetcher: fetcher, }) } - fn list_apps(&self) -> Vec { - self.endpoints.iter().filter_map(|(ref k, ref e)| { + fn list_apps(endpoints: &Endpoints) -> Vec { + endpoints.iter().filter_map(|(ref k, ref e)| { e.info().map(|ref info| App::from_info(k, info)) }).collect() } } -impl Endpoint for RestApi { +impl Endpoint for RestApi { fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box { - Box::new(RestApiRouter::new(self.clone(), path, control)) + Box::new(RestApiRouter::new((*self).clone(), path, control)) } } -struct RestApiRouter { - api: RestApi, +struct RestApiRouter { + api: RestApi, cors_header: Option, path: Option, control: Option, handler: Box, } -impl RestApiRouter { - fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { +impl RestApiRouter { + fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { RestApiRouter { path: Some(path), cors_header: None, @@ -114,7 +114,7 @@ impl RestApiRouter { } } -impl server::Handler for RestApiRouter { +impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into(); @@ -142,7 +142,7 @@ impl server::Handler for RestApiRouter { if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() } let handler = endpoint.and_then(|v| match v { - "apps" => Some(response::as_json(&self.api.list_apps())), + "apps" => Some(response::as_json(&self.api.apps)), "ping" => Some(response::ping()), "content" => self.resolve_content(hash, path, control), _ => None diff --git a/dapps/src/apps/fetcher/mod.rs b/dapps/src/apps/fetcher/mod.rs index c2607fe43..a824134cb 100644 --- a/dapps/src/apps/fetcher/mod.rs +++ b/dapps/src/apps/fetcher/mod.rs @@ -47,7 +47,8 @@ pub trait Fetcher: Send + Sync + 'static { fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box; } -pub struct ContentFetcher { +#[derive(Clone)] +pub struct ContentFetcher { dapps_path: PathBuf, resolver: R, cache: Arc>, @@ -57,14 +58,14 @@ pub struct ContentFetcher Drop for ContentFetcher { +impl Drop for ContentFetcher { fn drop(&mut self) { // Clear cache path let _ = fs::remove_dir_all(&self.dapps_path); } } -impl ContentFetcher { +impl ContentFetcher { pub fn new(resolver: R, sync_status: Arc, embeddable_on: Option<(String, u16)>, remote: Remote, fetch: F) -> Self { let mut dapps_path = env::temp_dir(); @@ -97,7 +98,7 @@ impl ContentFetcher { } } -impl Fetcher for ContentFetcher { +impl Fetcher for ContentFetcher { fn contains(&self, content_id: &str) -> bool { { let mut cache = self.cache.lock(); @@ -233,6 +234,7 @@ mod tests { use page::LocalPageEndpoint; use super::{ContentFetcher, Fetcher}; + #[derive(Clone)] struct FakeResolver; impl URLHint for FakeResolver { fn resolve(&self, _id: Bytes) -> Option { diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index 648d82ff8..ea5825b74 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -16,9 +16,10 @@ //! URL Endpoint traits -use hyper::{self, server, net}; use std::collections::BTreeMap; +use hyper::{self, server, net}; + #[derive(Debug, PartialEq, Default, Clone)] pub struct EndpointPath { pub app_id: String, diff --git a/dapps/src/handlers/auth.rs b/dapps/src/handlers/auth.rs deleted file mode 100644 index db6018e0d..000000000 --- a/dapps/src/handlers/auth.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Authorization Handlers - -use hyper::{server, Decoder, Encoder, Next}; -use hyper::net::HttpStream; -use hyper::status::StatusCode; - -pub struct AuthRequiredHandler; - -impl server::Handler for AuthRequiredHandler { - fn on_request(&mut self, _request: server::Request) -> Next { - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - res.set_status(StatusCode::Unauthorized); - res.headers_mut().set_raw("WWW-Authenticate", vec![b"Basic realm=\"Parity\"".to_vec()]); - Next::write() - } - - fn on_response_writable(&mut self, _encoder: &mut Encoder) -> Next { - Next::end() - } -} - diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index cec7be631..3e2daf462 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -16,14 +16,12 @@ //! Hyper handlers implementations. -mod auth; mod content; mod echo; mod fetch; mod redirect; mod streaming; -pub use self::auth::AuthRequiredHandler; pub use self::content::ContentHandler; pub use self::echo::EchoHandler; pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse}; diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 252e1c3bb..60aba30a4 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -34,9 +34,7 @@ extern crate zip; extern crate jsonrpc_core; extern crate jsonrpc_http_server; -extern crate jsonrpc_server_utils; -extern crate ethcore_rpc; extern crate ethcore_util as util; extern crate fetch; extern crate parity_dapps_glue as parity_dapps; @@ -61,7 +59,6 @@ mod apps; mod page; mod router; mod handlers; -mod rpc; mod api; mod proxypac; mod url; @@ -69,23 +66,16 @@ mod web; #[cfg(test)] mod tests; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; -use std::net::SocketAddr; use std::collections::HashMap; -use jsonrpc_core::{Middleware, MetaIoHandler}; -use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote; -pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin}; -pub use jsonrpc_http_server::hyper; +use jsonrpc_http_server::{self as http, hyper, AccessControlAllowOrigin}; -use ethcore_rpc::Metadata; -use fetch::{Fetch, Client as FetchClient}; -use hash_fetch::urlhint::ContractClient; +use fetch::Fetch; use parity_reactor::Remote; -use router::auth::{Authorization, NoAuth, HttpBasicAuth}; -use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; +pub use hash_fetch::urlhint::ContractClient; /// Indicates sync status pub trait SyncStatus: Send + Sync { @@ -107,296 +97,92 @@ impl WebProxyTokens for F where F: Fn(String) -> bool + Send + Sync { fn is_web_proxy_token_valid(&self, token: &str) -> bool { self(token.to_owned()) } } -/// Webapps HTTP+RPC server build. -pub struct ServerBuilder { - dapps_path: PathBuf, - extra_dapps: Vec, - registrar: Arc, - sync_status: Arc, - web_proxy_tokens: Arc, - signer_address: Option<(String, u16)>, - allowed_hosts: Option>, - extra_cors: Option>, - remote: Remote, - fetch: Option, +/// Dapps server as `jsonrpc-http-server` request middleware. +pub struct Middleware { + router: router::Router>, } -impl ServerBuilder { - /// Construct new dapps server - pub fn new>(dapps_path: P, registrar: Arc, remote: Remote) -> Self { - ServerBuilder { - dapps_path: dapps_path.as_ref().to_owned(), - extra_dapps: vec![], - registrar: registrar, - sync_status: Arc::new(|| false), - web_proxy_tokens: Arc::new(|_| false), - signer_address: None, - allowed_hosts: Some(vec![]), - extra_cors: None, - remote: remote, - fetch: None, - } - } -} - -impl ServerBuilder { - /// Set a fetch client to use. - pub fn fetch(self, fetch: X) -> ServerBuilder { - ServerBuilder { - dapps_path: self.dapps_path, - extra_dapps: vec![], - registrar: self.registrar, - sync_status: self.sync_status, - web_proxy_tokens: self.web_proxy_tokens, - signer_address: self.signer_address, - allowed_hosts: self.allowed_hosts, - extra_cors: self.extra_cors, - remote: self.remote, - fetch: Some(fetch), - } - } - - /// Change default sync status. - pub fn sync_status(mut self, status: Arc) -> Self { - self.sync_status = status; - self - } - - /// Change default web proxy tokens validator. - pub fn web_proxy_tokens(mut self, tokens: Arc) -> Self { - self.web_proxy_tokens = tokens; - self - } - - /// Change default signer port. - pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self { - self.signer_address = signer_address; - self - } - - /// Change allowed hosts. - /// `None` - All hosts are allowed - /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) - pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { - self.allowed_hosts = allowed_hosts.into(); - self - } - - /// Extra cors headers. - /// `None` - no additional CORS URLs - pub fn extra_cors_headers(mut self, cors: DomainsValidation) -> Self { - self.extra_cors = cors.into(); - self - } - - /// Change extra dapps paths (apart from `dapps_path`) - pub fn extra_dapps>(mut self, extra_dapps: &[P]) -> Self { - self.extra_dapps = extra_dapps.iter().map(|p| p.as_ref().to_owned()).collect(); - self - } - - /// Asynchronously start server with no authentication, - /// returns result with `Server` handle on success or an error. - pub fn start_unsecured_http>(self, addr: &SocketAddr, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { - let fetch = self.fetch_client()?; - Server::start_http( - addr, - self.allowed_hosts, - self.extra_cors, - NoAuth, - handler, - self.dapps_path, - self.extra_dapps, - self.signer_address, - self.registrar, - self.sync_status, - self.web_proxy_tokens, - self.remote, - tokio_remote, - fetch, - ) - } - - /// Asynchronously start server with `HTTP Basic Authentication`, - /// return result with `Server` handle on success or an error. - pub fn start_basic_auth_http>(self, addr: &SocketAddr, username: &str, password: &str, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { - let fetch = self.fetch_client()?; - Server::start_http( - addr, - self.allowed_hosts, - self.extra_cors, - HttpBasicAuth::single_user(username, password), - handler, - self.dapps_path, - self.extra_dapps, - self.signer_address, - self.registrar, - self.sync_status, - self.web_proxy_tokens, - self.remote, - tokio_remote, - fetch, - ) - } - - fn fetch_client(&self) -> Result { - match self.fetch.clone() { - Some(fetch) => Ok(fetch), - None => T::new().map_err(|_| ServerError::FetchInitialization), - } - } -} - -/// Webapps HTTP server. -pub struct Server { - server: Option, -} - -impl Server { - /// Returns a list of allowed hosts or `None` if all hosts are allowed. - fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { - let mut allowed = Vec::new(); - - match hosts { - Some(hosts) => allowed.extend_from_slice(&hosts), - None => return None, - } - - // Add localhost domain as valid too if listening on loopback interface. - allowed.push(bind_address.replace("127.0.0.1", "localhost").into()); - allowed.push(bind_address.into()); - Some(allowed) - } - - /// Returns a list of CORS domains for API endpoint. - fn cors_domains( +impl Middleware { + /// Creates new Dapps server middleware. + pub fn new( + remote: Remote, signer_address: Option<(String, u16)>, - extra_cors: Option>, - ) -> Vec { - let basic_cors = match signer_address { - Some(signer_address) => [ - format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("http://{}", address(&signer_address)), - format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("https://{}", address(&signer_address)), - ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), - None => vec![], - }; - - match extra_cors { - None => basic_cors, - Some(extra_cors) => basic_cors.into_iter().chain(extra_cors).collect(), - } - } - - fn start_http>( - addr: &SocketAddr, - hosts: Option>, - extra_cors: Option>, - authorization: A, - handler: MetaIoHandler, dapps_path: PathBuf, extra_dapps: Vec, - signer_address: Option<(String, u16)>, registrar: Arc, sync_status: Arc, web_proxy_tokens: Arc, - remote: Remote, - tokio_remote: TokioRemote, fetch: F, - ) -> Result { - let authorization = Arc::new(authorization); - let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( + ) -> Self { + let content_fetcher = apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), sync_status, signer_address.clone(), remote.clone(), fetch.clone(), - )); - let endpoints = Arc::new(apps::all_endpoints( + ); + let endpoints = apps::all_endpoints( dapps_path, extra_dapps, signer_address.clone(), web_proxy_tokens, remote.clone(), fetch.clone(), - )); - let cors_domains = Self::cors_domains(signer_address.clone(), extra_cors); + ); - let special = Arc::new({ + let cors_domains = cors_domains(signer_address.clone()); + + let special = { let mut special = HashMap::new(); - special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, tokio_remote, cors_domains.clone())); - special.insert(router::SpecialEndpoint::Utils, apps::utils()); + special.insert(router::SpecialEndpoint::Rpc, None); + special.insert(router::SpecialEndpoint::Utils, Some(apps::utils())); special.insert( router::SpecialEndpoint::Api, - api::RestApi::new(cors_domains, endpoints.clone(), content_fetcher.clone()) + Some(api::RestApi::new(cors_domains.clone(), &endpoints, content_fetcher.clone())), ); special - }); - let hosts = Self::allowed_hosts(hosts, format!("{}", addr)); + }; - hyper::Server::http(addr)? - .handle(move |ctrl| router::Router::new( - ctrl, - signer_address.clone(), - content_fetcher.clone(), - endpoints.clone(), - special.clone(), - authorization.clone(), - hosts.clone(), - )) - .map(|(l, srv)| { + let router = router::Router::new( + signer_address, + content_fetcher, + endpoints, + special, + ); - ::std::thread::spawn(move || { - srv.run(); - }); - - Server { - server: Some(l), - } - }) - .map_err(ServerError::from) - } - - #[cfg(test)] - /// Returns address that this server is bound to. - pub fn addr(&self) -> &SocketAddr { - self.server.as_ref() - .expect("server is always Some at the start; it's consumed only when object is dropped; qed") - .addrs() - .first() - .expect("You cannot start the server without binding to at least one address; qed") - } -} - -impl Drop for Server { - fn drop(&mut self) { - self.server.take().unwrap().close() - } -} - -/// Webapp Server startup error -#[derive(Debug)] -pub enum ServerError { - /// Wrapped `std::io::Error` - IoError(std::io::Error), - /// Other `hyper` error - Other(hyper::error::Error), - /// Fetch service initialization error - FetchInitialization, -} - -impl From for ServerError { - fn from(err: hyper::error::Error) -> Self { - match err { - hyper::error::Error::Io(e) => ServerError::IoError(e), - e => ServerError::Other(e), + Middleware { + router: router, } } } +impl http::RequestMiddleware for Middleware { + fn on_request(&self, req: &hyper::server::Request, control: &hyper::Control) -> http::RequestMiddlewareAction { + self.router.on_request(req, control) + } +} + +/// Returns a list of CORS domains for API endpoint. +fn cors_domains(signer_address: Option<(String, u16)>) -> Vec { + use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; + + match signer_address { + Some(signer_address) => [ + format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), + format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), + format!("http://{}", address(&signer_address)), + format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), + format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), + format!("https://{}", address(&signer_address)), + ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), + None => vec![], + } +} + +fn address(address: &(String, u16)) -> String { + format!("{}:{}", address.0, address.1) +} + /// Random filename fn random_filename() -> String { use ::rand::Rng; @@ -404,39 +190,18 @@ fn random_filename() -> String { rng.gen_ascii_chars().take(12).collect() } -fn address(address: &(String, u16)) -> String { - format!("{}:{}", address.0, address.1) -} - #[cfg(test)] mod util_tests { - use super::Server; + use super::cors_domains; use jsonrpc_http_server::AccessControlAllowOrigin; - #[test] - fn should_return_allowed_hosts() { - // given - let bind_address = "127.0.0.1".to_owned(); - - // when - let all = Server::allowed_hosts(None, bind_address.clone()); - let address = Server::allowed_hosts(Some(Vec::new()), bind_address.clone()); - let some = Server::allowed_hosts(Some(vec!["ethcore.io".into()]), bind_address.clone()); - - // then - assert_eq!(all, None); - assert_eq!(address, Some(vec!["localhost".into(), "127.0.0.1".into()])); - assert_eq!(some, Some(vec!["ethcore.io".into(), "localhost".into(), "127.0.0.1".into()])); - } - #[test] fn should_return_cors_domains() { // given // when - let none = Server::cors_domains(None, None); - let some = Server::cors_domains(Some(("127.0.0.1".into(), 18180)), None); - let extra = Server::cors_domains(None, Some(vec!["all".into()])); + let none = cors_domains(None); + let some = cors_domains(Some(("127.0.0.1".into(), 18180))); // then assert_eq!(none, Vec::::new()); @@ -448,6 +213,5 @@ mod util_tests { "https://parity.web3.site:18180".into(), "https://127.0.0.1:18180".into(), ]); - assert_eq!(extra, vec![AccessControlAllowOrigin::Any]); } } diff --git a/dapps/src/router/mod.rs b/dapps/src/router.rs similarity index 71% rename from dapps/src/router/mod.rs rename to dapps/src/router.rs index 0b4e632a6..995565f26 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router.rs @@ -15,24 +15,20 @@ // along with Parity. If not, see . //! Router implementation -//! Processes request handling authorization and dispatching it to proper application. - -pub mod auth; -mod host_validation; +//! Dispatch requests to proper application. use address; use std::cmp; -use std::sync::Arc; use std::collections::HashMap; use url::{Url, Host}; -use hyper::{self, server, header, Next, Encoder, Decoder, Control, StatusCode}; +use hyper::{self, server, header, Control, StatusCode}; use hyper::net::HttpStream; -use jsonrpc_server_utils::hosts; +use jsonrpc_http_server as http; use apps::{self, DAPPS_DOMAIN}; use apps::fetcher::Fetcher; -use endpoint::{Endpoint, Endpoints, EndpointPath}; +use endpoint::{Endpoint, Endpoints, EndpointPath, Handler}; use handlers::{self, Redirection, ContentHandler}; /// Special endpoints are accessible on every domain (every dapp) @@ -44,51 +40,29 @@ pub enum SpecialEndpoint { None, } -pub struct Router { - control: Option, +pub struct Router { signer_address: Option<(String, u16)>, - endpoints: Arc, - fetch: Arc, - special: Arc>>, - authorization: Arc, - allowed_hosts: Option>, - handler: Box + Send>, + endpoints: Endpoints, + fetch: F, + special: HashMap>>, } -impl server::Handler for Router { - - fn on_request(&mut self, req: server::Request) -> Next { +impl http::RequestMiddleware for Router { + fn on_request(&self, req: &server::Request, control: &Control) -> http::RequestMiddlewareAction { // Choose proper handler depending on path / domain - let url = handlers::extract_url(&req); + let url = handlers::extract_url(req); let endpoint = extract_endpoint(&url); - let referer = extract_referer_endpoint(&req); + let referer = extract_referer_endpoint(req); let is_utils = endpoint.1 == SpecialEndpoint::Utils; + let is_dapps_domain = endpoint.0.as_ref().map(|endpoint| endpoint.using_dapps_domains).unwrap_or(false); + let is_origin_set = req.headers().get::().is_some(); let is_get_request = *req.method() == hyper::Method::Get; trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); - // Validate Host header - trace!(target: "dapps", "Validating host headers against: {:?}", self.allowed_hosts); - let is_valid = is_utils || host_validation::is_valid(&req, &self.allowed_hosts); - if !is_valid { - debug!(target: "dapps", "Rejecting invalid host header."); - self.handler = host_validation::host_invalid_response(); - return self.handler.on_request(req); - } - - trace!(target: "dapps", "Checking authorization."); - // Check authorization - let auth = self.authorization.is_authorized(&req); - if let auth::Authorized::No(handler) = auth { - debug!(target: "dapps", "Authorization denied."); - self.handler = handler; - return self.handler.on_request(req); - } - - - let control = self.control.take().expect("on_request is called only once; control is always defined at start; qed"); + let control = control.clone(); debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint); - self.handler = match (endpoint.0, endpoint.1, referer) { + let handler: Option> = match (endpoint.0, endpoint.1, referer) { // Handle invalid web requests that we can recover from (ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) if referer.app_id == apps::WEB_PATH @@ -100,26 +74,27 @@ impl server::Handler for Router let len = cmp::min(referer_url.path.len(), 2); // /web// let base = referer_url.path[..len].join("/"); let requested = url.map(|u| u.path.join("/")).unwrap_or_default(); - Redirection::boxed(&format!("/{}/{}", base, requested)) + Some(Redirection::boxed(&format!("/{}/{}", base, requested))) }, // First check special endpoints (ref path, ref endpoint, _) if self.special.contains_key(endpoint) => { trace!(target: "dapps", "Resolving to special endpoint."); self.special.get(endpoint) .expect("special known to contain key; qed") - .to_async_handler(path.clone().unwrap_or_default(), control) + .as_ref() + .map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control)) }, // Then delegate to dapp (Some(ref path), _, _) if self.endpoints.contains_key(&path.app_id) => { trace!(target: "dapps", "Resolving to local/builtin dapp."); - self.endpoints.get(&path.app_id) + Some(self.endpoints.get(&path.app_id) .expect("endpoints known to contain key; qed") - .to_async_handler(path.clone(), control) + .to_async_handler(path.clone(), control)) }, // Try to resolve and fetch the dapp (Some(ref path), _, _) if self.fetch.contains(&path.app_id) => { trace!(target: "dapps", "Resolving to fetchable content."); - self.fetch.to_async_handler(path.clone(), control) + Some(self.fetch.to_async_handler(path.clone(), control)) }, // NOTE [todr] /home is redirected to home page since some users may have the redirection cached // (in the past we used 301 instead of 302) @@ -128,82 +103,61 @@ impl server::Handler for Router // 404 for non-existent content (Some(ref path), _, _) if is_get_request && path.app_id != "home" => { trace!(target: "dapps", "Resolving to 404."); - Box::new(ContentHandler::error( + Some(Box::new(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Requested content was not found.", None, self.signer_address.clone(), - )) + ))) }, // Redirect any other GET request to signer. _ if is_get_request => { if let Some(ref signer_address) = self.signer_address { trace!(target: "dapps", "Redirecting to signer interface."); - Redirection::boxed(&format!("http://{}", address(signer_address))) + Some(Redirection::boxed(&format!("http://{}", address(signer_address)))) } else { trace!(target: "dapps", "Signer disabled, returning 404."); - Box::new(ContentHandler::error( + Some(Box::new(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Your homepage is not available when Trusted Signer is disabled.", Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), self.signer_address.clone(), - )) + ))) } }, // RPC by default _ => { trace!(target: "dapps", "Resolving to RPC call."); - self.special.get(&SpecialEndpoint::Rpc) - .expect("RPC endpoint always stored; qed") - .to_async_handler(EndpointPath::default(), control) + None } }; - // Delegate on_request to proper handler - self.handler.on_request(req) - } - - /// This event occurs each time the `Request` is ready to be read from. - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - self.handler.on_request_readable(decoder) - } - - /// This event occurs after the first time this handled signals `Next::write()`. - fn on_response(&mut self, response: &mut server::Response) -> Next { - self.handler.on_response(response) - } - - /// This event occurs each time the `Response` is ready to be written to. - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - self.handler.on_response_writable(encoder) + match handler { + Some(handler) => http::RequestMiddlewareAction::Respond { + should_validate_hosts: !(is_utils || is_dapps_domain), + handler: handler, + }, + None => http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: !is_origin_set, + }, + } } } -impl Router { +impl Router { pub fn new( - control: Control, signer_address: Option<(String, u16)>, - content_fetcher: Arc, - endpoints: Arc, - special: Arc>>, - authorization: Arc, - allowed_hosts: Option>, - ) -> Self { - - let handler = special.get(&SpecialEndpoint::Utils) - .expect("Utils endpoint always stored; qed") - .to_handler(EndpointPath::default()); + content_fetcher: F, + endpoints: Endpoints, + special: HashMap>>, + ) -> Self { Router { - control: Some(control), signer_address: signer_address, endpoints: endpoints, fetch: content_fetcher, special: special, - authorization: authorization, - allowed_hosts: allowed_hosts, - handler: handler, } } } diff --git a/dapps/src/router/auth.rs b/dapps/src/router/auth.rs deleted file mode 100644 index 007ebb96d..000000000 --- a/dapps/src/router/auth.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! HTTP Authorization implementations - -use std::collections::HashMap; -use hyper::{server, net, header, status}; -use endpoint::Handler; -use handlers::{AuthRequiredHandler, ContentHandler}; - -/// Authorization result -pub enum Authorized { - /// Authorization was successful. - Yes, - /// Unsuccessful authorization. Handler for further work is returned. - No(Box), -} - -/// Authorization interface -pub trait Authorization : Send + Sync { - /// Checks if authorization is valid. - fn is_authorized(&self, req: &server::Request)-> Authorized; -} - -/// HTTP Basic Authorization handler -pub struct HttpBasicAuth { - users: HashMap, -} - -/// No-authorization implementation (authorization disabled) -pub struct NoAuth; - -impl Authorization for NoAuth { - fn is_authorized(&self, _req: &server::Request)-> Authorized { - Authorized::Yes - } -} - -impl Authorization for HttpBasicAuth { - fn is_authorized(&self, req: &server::Request) -> Authorized { - let auth = self.check_auth(&req); - - match auth { - Access::Denied => { - Authorized::No(Box::new(ContentHandler::error( - status::StatusCode::Unauthorized, - "Unauthorized", - "You need to provide valid credentials to access this page.", - None, - None, - ))) - }, - Access::AuthRequired => { - Authorized::No(Box::new(AuthRequiredHandler)) - }, - Access::Granted => { - Authorized::Yes - }, - } - } -} - -#[derive(Debug)] -enum Access { - Granted, - Denied, - AuthRequired, -} - -impl HttpBasicAuth { - /// Creates `HttpBasicAuth` instance with only one user. - pub fn single_user(username: &str, password: &str) -> Self { - let mut users = HashMap::new(); - users.insert(username.to_owned(), password.to_owned()); - HttpBasicAuth { - users: users - } - } - - fn is_authorized(&self, username: &str, password: &str) -> bool { - self.users.get(&username.to_owned()).map_or(false, |pass| pass == password) - } - - fn check_auth(&self, req: &server::Request) -> Access { - match req.headers().get::>() { - Some(&header::Authorization( - header::Basic { ref username, password: Some(ref password) } - )) if self.is_authorized(username, password) => Access::Granted, - Some(_) => Access::Denied, - None => Access::AuthRequired, - } - } -} diff --git a/dapps/src/router/host_validation.rs b/dapps/src/router/host_validation.rs deleted file mode 100644 index e5fcedd94..000000000 --- a/dapps/src/router/host_validation.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - - -use apps::DAPPS_DOMAIN; -use hyper::{server, header, StatusCode}; -use hyper::net::HttpStream; - -use handlers::ContentHandler; -use jsonrpc_http_server; -use jsonrpc_server_utils::hosts; - -pub fn is_valid(req: &server::Request, allowed_hosts: &Option>) -> bool { - let header_valid = jsonrpc_http_server::is_host_allowed(req, allowed_hosts); - match (header_valid, req.headers().get::()) { - (true, _) => true, - (_, Some(host)) => host.hostname.ends_with(DAPPS_DOMAIN), - _ => false, - } -} - -pub fn host_invalid_response() -> Box + Send> { - Box::new(ContentHandler::error(StatusCode::Forbidden, - "Current Host Is Disallowed", - "You are trying to access your node using incorrect address.", - Some("Use allowed URL or specify different hosts CLI options."), - None, - )) -} diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index 6ddb31db0..b743408dc 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -66,14 +66,14 @@ impl> Endpoint for RpcEndpoint { #[derive(Default)] struct NoopMiddleware; impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, request: &http::hyper::server::Request) -> http::RequestMiddlewareAction { + fn on_request(&self, request: &http::hyper::server::Request, _control: &http::hyper::Control) -> http::RequestMiddlewareAction { http::RequestMiddlewareAction::Proceed { should_continue_on_invalid_cors: request.headers().get::().is_none(), } } } -struct MetadataExtractor; +pub struct MetadataExtractor; impl HttpMetaExtractor for MetadataExtractor { fn read_metadata(&self, request: &http::hyper::server::Request) -> Metadata { let dapp_id = request.headers().get::() diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index 73467e854..043814377 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use tests::helpers::{serve, serve_with_registrar, serve_extra_cors, request, assert_security_headers}; +use tests::helpers::{serve, serve_with_registrar, request, assert_security_headers}; #[test] fn should_return_error() { @@ -195,26 +195,3 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_port() { response.assert_status("HTTP/1.1 200 OK"); response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180"); } - -#[test] -fn should_return_extra_cors_headers() { - // given - let server = serve_extra_cors(Some(vec!["all".to_owned()])); - - // when - let response = request(server, - "\ - POST /api/ping HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: http://somedomain.io\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Access-Control-Allow-Origin", "http://somedomain.io"); -} - diff --git a/dapps/src/tests/authorization.rs b/dapps/src/tests/authorization.rs deleted file mode 100644 index 346f8f2fb..000000000 --- a/dapps/src/tests/authorization.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use tests::helpers::{serve_with_auth, request, assert_security_headers_for_embed}; - -#[test] -fn should_require_authorization() { - // given - let server = serve_with_auth("test", "test"); - - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); - assert_eq!(response.headers.get(0).unwrap(), "WWW-Authenticate: Basic realm=\"Parity\""); -} - -#[test] -fn should_reject_on_invalid_auth() { - // given - let server = serve_with_auth("test", "test"); - - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); - assert!(response.body.contains("Unauthorized"), response.body); - assert_eq!(response.headers_raw.contains("WWW-Authenticate"), false); -} - -#[test] -fn should_allow_on_valid_auth() { - // given - let server = serve_with_auth("Aladdin", "OpenSesame"); - - // when - let response = request(server, - "\ - GET /ui/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_security_headers_for_embed(&response.headers); -} diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index 036933995..e6c032549 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -16,18 +16,20 @@ use std::env; use std::str; -use std::ops::Deref; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; use std::sync::Arc; use env_logger::LogBuilder; -use ethcore_rpc::Metadata; -use jsonrpc_core::MetaIoHandler; +use jsonrpc_core::IoHandler; +use jsonrpc_http_server::{self as http, Host, DomainsValidation}; -use ServerBuilder; -use Server; -use fetch::Fetch; use devtools::http_client; +use hash_fetch::urlhint::ContractClient; +use fetch::{Fetch, Client as FetchClient}; use parity_reactor::{EventLoop, Remote}; +use {Middleware, SyncStatus, WebProxyTokens}; + mod registrar; mod fetch; @@ -50,7 +52,7 @@ pub struct ServerLoop { pub event_loop: EventLoop, } -impl Deref for ServerLoop { +impl ::std::ops::Deref for ServerLoop { type Target = Server; fn deref(&self) -> &Self::Target { @@ -58,7 +60,7 @@ impl Deref for ServerLoop { } } -pub fn init_server(process: F, io: MetaIoHandler, remote: Remote) -> (ServerLoop, Arc) where +pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (ServerLoop, Arc) where F: FnOnce(ServerBuilder) -> ServerBuilder, B: Fetch, { @@ -74,33 +76,15 @@ pub fn init_server(process: F, io: MetaIoHandler, remote: Remote &dapps_path, registrar.clone(), remote, )) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io, event_loop.raw_remote()).unwrap(); + .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap(); ( ServerLoop { server: server, event_loop: event_loop }, registrar, ) } -pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { - init_logger(); - let registrar = Arc::new(FakeRegistrar::new()); - let mut dapps_path = env::temp_dir(); - dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); - - let event_loop = EventLoop::spawn(); - let io = MetaIoHandler::default(); - let server = ServerBuilder::new(&dapps_path, registrar, event_loop.remote()) - .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .allowed_hosts(None.into()) - .start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, io, event_loop.raw_remote()).unwrap(); - ServerLoop { - server: server, - event_loop: event_loop, - } -} - -pub fn serve_with_rpc(io: MetaIoHandler) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None.into()), io, Remote::new_sync()).0 +pub fn serve_with_rpc(io: IoHandler) -> ServerLoop { + init_server(|builder| builder, io, Remote::new_sync()).0 } pub fn serve_hosts(hosts: Option>) -> ServerLoop { @@ -108,20 +92,13 @@ pub fn serve_hosts(hosts: Option>) -> ServerLoop { init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 } -pub fn serve_extra_cors(extra_cors: Option>) -> ServerLoop { - let extra_cors = extra_cors.map(|cors| cors.into_iter().map(Into::into).collect()); - init_server(|builder| builder.allowed_hosts(None.into()).extra_cors_headers(extra_cors.into()), Default::default(), Remote::new_sync()).0 -} - pub fn serve_with_registrar() -> (ServerLoop, Arc) { - init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()) + init_server(|builder| builder, Default::default(), Remote::new_sync()) } pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc) { init_server(|builder| { - builder - .sync_status(Arc::new(|| true)) - .allowed_hosts(None.into()) + builder.sync_status(Arc::new(|| true)) }, Default::default(), Remote::new_sync()) } @@ -133,7 +110,7 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, reg) = init_server(move |builder| { - builder.allowed_hosts(None.into()).fetch(f.clone()) + builder.fetch(f.clone()) }, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); (server, fetch, reg) @@ -144,7 +121,6 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { let f = fetch.clone(); let (server, _) = init_server(move |builder| { builder - .allowed_hosts(None.into()) .fetch(f.clone()) .web_proxy_tokens(Arc::new(move |token| &token == web_token)) }, Default::default(), Remote::new_sync()); @@ -153,7 +129,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { } pub fn serve() -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()).0 + init_server(|builder| builder, Default::default(), Remote::new_sync()).0 } pub fn request(server: ServerLoop, request: &str) -> http_client::Response { @@ -166,3 +142,157 @@ pub fn assert_security_headers(headers: &[String]) { pub fn assert_security_headers_for_embed(headers: &[String]) { http_client::assert_security_headers_present(headers, Some(SIGNER_PORT)) } + + +/// Webapps HTTP+RPC server build. +pub struct ServerBuilder { + dapps_path: PathBuf, + registrar: Arc, + sync_status: Arc, + web_proxy_tokens: Arc, + signer_address: Option<(String, u16)>, + allowed_hosts: DomainsValidation, + remote: Remote, + fetch: Option, +} + +impl ServerBuilder { + /// Construct new dapps server + pub fn new>(dapps_path: P, registrar: Arc, remote: Remote) -> Self { + ServerBuilder { + dapps_path: dapps_path.as_ref().to_owned(), + registrar: registrar, + sync_status: Arc::new(|| false), + web_proxy_tokens: Arc::new(|_| false), + signer_address: None, + allowed_hosts: DomainsValidation::Disabled, + remote: remote, + fetch: None, + } + } +} + +impl ServerBuilder { + /// Set a fetch client to use. + pub fn fetch(self, fetch: X) -> ServerBuilder { + ServerBuilder { + dapps_path: self.dapps_path, + registrar: self.registrar, + sync_status: self.sync_status, + web_proxy_tokens: self.web_proxy_tokens, + signer_address: self.signer_address, + allowed_hosts: self.allowed_hosts, + remote: self.remote, + fetch: Some(fetch), + } + } + + /// Change default sync status. + pub fn sync_status(mut self, status: Arc) -> Self { + self.sync_status = status; + self + } + + /// Change default web proxy tokens validator. + pub fn web_proxy_tokens(mut self, tokens: Arc) -> Self { + self.web_proxy_tokens = tokens; + self + } + + /// Change default signer port. + pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self { + self.signer_address = signer_address; + self + } + + /// Change allowed hosts. + /// `None` - All hosts are allowed + /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) + pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { + self.allowed_hosts = allowed_hosts; + self + } + + /// Asynchronously start server with no authentication, + /// returns result with `Server` handle on success or an error. + pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> Result { + let fetch = self.fetch_client(); + Server::start_http( + addr, + io, + self.allowed_hosts, + self.signer_address, + self.dapps_path, + vec![], + self.registrar, + self.sync_status, + self.web_proxy_tokens, + self.remote, + fetch, + ) + } + + fn fetch_client(&self) -> T { + match self.fetch.clone() { + Some(fetch) => fetch, + None => T::new().unwrap(), + } + } +} + + +/// Webapps HTTP server. +pub struct Server { + server: Option, +} + +impl Server { + fn start_http( + addr: &SocketAddr, + io: IoHandler, + allowed_hosts: DomainsValidation, + signer_address: Option<(String, u16)>, + dapps_path: PathBuf, + extra_dapps: Vec, + registrar: Arc, + sync_status: Arc, + web_proxy_tokens: Arc, + remote: Remote, + fetch: F, + ) -> Result { + let middleware = Middleware::new( + remote, + signer_address, + dapps_path, + extra_dapps, + registrar, + sync_status, + web_proxy_tokens, + fetch, + ); + http::ServerBuilder::new(io) + .request_middleware(middleware) + .allowed_hosts(allowed_hosts) + .cors(http::DomainsValidation::Disabled) + .start_http(addr) + .map(|server| Server { + server: Some(server), + }) + } + + /// Returns address that this server is bound to. + pub fn addr(&self) -> &SocketAddr { + self.server.as_ref() + .expect("server is always Some at the start; it's consumed only when object is dropped; qed") + .addrs() + .first() + .expect("You cannot start the server without binding to at least one address; qed") + } +} + +impl Drop for Server { + fn drop(&mut self) { + self.server.take().unwrap().close() + } +} + diff --git a/dapps/src/tests/mod.rs b/dapps/src/tests/mod.rs index ced211d53..089318483 100644 --- a/dapps/src/tests/mod.rs +++ b/dapps/src/tests/mod.rs @@ -19,7 +19,6 @@ mod helpers; mod api; -mod authorization; mod fetch; mod redirection; mod rpc; diff --git a/dapps/src/tests/rpc.rs b/dapps/src/tests/rpc.rs index 2cc4ccb24..0cfc2c5a8 100644 --- a/dapps/src/tests/rpc.rs +++ b/dapps/src/tests/rpc.rs @@ -14,16 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use futures::{future, Future}; -use ethcore_rpc::{Metadata, Origin}; -use jsonrpc_core::{MetaIoHandler, Value}; +use jsonrpc_core::{IoHandler, Value}; use tests::helpers::{serve_with_rpc, request}; #[test] fn should_serve_rpc() { // given - let mut io = MetaIoHandler::default(); + let mut io = IoHandler::default(); io.add_method("rpc_test", |_| { Ok(Value::String("Hello World!".into())) }); @@ -49,70 +47,3 @@ fn should_serve_rpc() { response.assert_status("HTTP/1.1 200 OK"); assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); } - -#[test] -fn should_extract_metadata() { - // given - let mut io = MetaIoHandler::default(); - io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("".into())); - assert_eq!(meta.dapp_id(), "".into()); - future::ok(Value::String("Hello World!".into())).boxed() - }); - let server = serve_with_rpc(io); - - // when - let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#; - let response = request(server, &format!( - "\ - POST /rpc/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - X-Parity-Origin: https://this.should.be.ignored\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - \r\n\ - {}\r\n\ - ", - req.as_bytes().len(), - req, - )); - - // then - response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); -} - -#[test] -fn should_extract_metadata_from_custom_header() { - // given - let mut io = MetaIoHandler::default(); - io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("https://parity.io/".into())); - assert_eq!(meta.dapp_id(), "https://parity.io/".into()); - future::ok(Value::String("Hello World!".into())).boxed() - }); - let server = serve_with_rpc(io); - - // when - let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#; - let response = request(server, &format!( - "\ - POST /rpc/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Origin: null\r\n\ - X-Parity-Origin: https://parity.io/\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - \r\n\ - {}\r\n\ - ", - req.as_bytes().len(), - req, - )); - - // then - response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); -} diff --git a/dapps/src/tests/validation.rs b/dapps/src/tests/validation.rs index afeb7b5ef..fb68cf5ed 100644 --- a/dapps/src/tests/validation.rs +++ b/dapps/src/tests/validation.rs @@ -34,7 +34,7 @@ fn should_reject_invalid_host() { // then assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); - assert!(response.body.contains("Current Host Is Disallowed"), response.body); + assert!(response.body.contains("Provided Host header is not whitelisted."), response.body); } #[test] @@ -97,31 +97,3 @@ fn should_allow_parity_utils_even_on_invalid_domain() { // then assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); } - -#[test] -fn should_not_return_cors_headers_for_rpc() { - // given - let server = serve_hosts(Some(vec!["localhost:8080".into()])); - - // when - let response = request(server, - "\ - POST /rpc HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: null\r\n\ - Content-Type: application/json\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - !response.headers_raw.contains("Access-Control-Allow-Origin"), - "CORS headers were not expected: {:?}", - response.headers - ); -} - diff --git a/hash-fetch/src/urlhint.rs b/hash-fetch/src/urlhint.rs index 1588b5482..579c83845 100644 --- a/hash-fetch/src/urlhint.rs +++ b/hash-fetch/src/urlhint.rs @@ -92,12 +92,13 @@ pub enum URLHintResult { } /// URLHint Contract interface -pub trait URLHint { +pub trait URLHint: Send + Sync { /// Resolves given id to registrar entry. fn resolve(&self, id: Bytes) -> Option; } /// `URLHintContract` API +#[derive(Clone)] pub struct URLHintContract { urlhint: Contract, registrar: Contract, diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index df03b6cd7..eeac2431b 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -32,12 +32,13 @@ use std::sync::Arc; use std::net::{SocketAddr, IpAddr}; use error::ServerError; use route::Out; -use http::hyper::server::{Listening, Handler, Request, Response}; +use http::hyper::server::{Handler, Request, Response}; use http::hyper::net::HttpStream; use http::hyper::header::{self, Vary, ContentLength, ContentType}; use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use ethcore::client::BlockChainClient; +pub use http::hyper::server::Listening; pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 65b1cfea4..7576c063f 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -164,6 +164,8 @@ usage! { or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), flag_jsonrpc_hosts: String = "none", or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), + flag_jsonrpc_threads: Option = None, + or |c: &Config| otry!(c.rpc).threads.map(Some), // IPC flag_no_ipc: bool = false, @@ -176,21 +178,8 @@ usage! { // DAPPS flag_no_dapps: bool = false, or |c: &Config| otry!(c.dapps).disable.clone(), - flag_dapps_port: u16 = 8080u16, - or |c: &Config| otry!(c.dapps).port.clone(), - flag_dapps_interface: String = "local", - or |c: &Config| otry!(c.dapps).interface.clone(), - flag_dapps_hosts: String = "none", - or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| vec.join(",")), - flag_dapps_cors: Option = None, - or |c: &Config| otry!(c.dapps).cors.clone().map(Some), flag_dapps_path: String = "$BASE/dapps", or |c: &Config| otry!(c.dapps).path.clone(), - flag_dapps_user: Option = None, - or |c: &Config| otry!(c.dapps).user.clone().map(Some), - flag_dapps_pass: Option = None, - or |c: &Config| otry!(c.dapps).pass.clone().map(Some), - flag_dapps_apis_all: bool = false, or |_| None, // Secret Store flag_no_secretstore: bool = false, @@ -330,6 +319,22 @@ usage! { or |c: &Config| otry!(c.misc).log_file.clone().map(Some), flag_no_color: bool = false, or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(), + + + // -- Legacy Options supported in configs + flag_dapps_port: Option = None, + or |c: &Config| otry!(c.dapps).port.clone().map(Some), + flag_dapps_interface: Option = None, + or |c: &Config| otry!(c.dapps).interface.clone().map(Some), + flag_dapps_hosts: Option = None, + or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| Some(vec.join(","))), + flag_dapps_cors: Option = None, + or |c: &Config| otry!(c.dapps).cors.clone().map(Some), + flag_dapps_user: Option = None, + or |c: &Config| otry!(c.dapps).user.clone().map(Some), + flag_dapps_pass: Option = None, + or |c: &Config| otry!(c.dapps).pass.clone().map(Some), + flag_dapps_apis_all: Option = None, or |_| None, } { // Values with optional default value. @@ -419,6 +424,7 @@ struct Rpc { cors: Option, apis: Option>, hosts: Option>, + threads: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -672,6 +678,7 @@ mod tests { flag_jsonrpc_cors: Some("null".into()), flag_jsonrpc_apis: "web3,eth,net,parity,traces,rpc".into(), flag_jsonrpc_hosts: "none".into(), + flag_jsonrpc_threads: None, // IPC flag_no_ipc: false, @@ -679,15 +686,8 @@ mod tests { flag_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc".into(), // DAPPS - flag_no_dapps: false, - flag_dapps_port: 8080u16, - flag_dapps_interface: "local".into(), - flag_dapps_hosts: "none".into(), - flag_dapps_cors: None, flag_dapps_path: "$HOME/.parity/dapps".into(), - flag_dapps_user: Some("test_user".into()), - flag_dapps_pass: Some("test_pass".into()), - flag_dapps_apis_all: false, + flag_no_dapps: false, flag_no_secretstore: false, flag_secretstore_port: 8082u16, @@ -792,6 +792,14 @@ mod tests { flag_extradata: None, flag_cache: None, flag_warp: Some(true), + // Legacy-Dapps + flag_dapps_port: Some(8080), + flag_dapps_interface: Some("local".into()), + flag_dapps_hosts: Some("none".into()), + flag_dapps_cors: None, + flag_dapps_user: Some("test_user".into()), + flag_dapps_pass: Some("test_pass".into()), + flag_dapps_apis_all: None, // -- Miscellaneous Options flag_version: false, @@ -873,6 +881,7 @@ mod tests { cors: None, apis: None, hosts: None, + threads: None, }), ipc: Some(Ipc { disable: None, diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 4c1abafbe..1ebeffef9 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -149,6 +149,8 @@ API and Console Options: is additional security against some attack vectors. Special options: "all", "none", (default: {flag_jsonrpc_hosts}). + --jsonrpc-threads THREADS Enables experimental faster implementation of JSON-RPC server. + Requires Dapps server to be disabled using --no-dapps. (default: {flag_jsonrpc_threads:?}) --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc}) --ipc-path PATH Specify custom path for JSON-RPC over IPC service @@ -157,29 +159,8 @@ API and Console Options: IPC (default: {flag_ipc_apis}). --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps}) - --dapps-port PORT Specify the port portion of the Dapps server - (default: {flag_dapps_port}). - --dapps-interface IP Specify the hostname portion of the Dapps - server, IP should be an interface's IP address, - or local (default: {flag_dapps_interface}). - --dapps-hosts HOSTS List of allowed Host header values. This option will - validate the Host header sent by the browser, it - is additional security against some attack - vectors. Special options: "all", "none", - (default: {flag_dapps_hosts}). - --dapps-cors URL Specify CORS headers for Dapps server APIs. - (default: {flag_dapps_cors:?}) - --dapps-user USERNAME Specify username for Dapps server. It will be - used in HTTP Basic Authentication Scheme. - If --dapps-pass is not specified you will be - asked for password on startup. (default: {flag_dapps_user:?}) - --dapps-pass PASSWORD Specify password for Dapps server. Use only in - conjunction with --dapps-user. (default: {flag_dapps_pass:?}) --dapps-path PATH Specify directory where dapps should be installed. (default: {flag_dapps_path}) - --dapps-apis-all Expose all possible RPC APIs on Dapps port. - WARNING: INSECURE. Used only for development. - (default: {flag_dapps_apis_all}) --ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api}) --ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen. (default: {flag_ipfs_api_port}) @@ -392,6 +373,13 @@ Legacy Options: --jsonrpc-off Equivalent to --no-jsonrpc. -w --webapp Does nothing; dapps server is on by default now. --dapps-off Equivalent to --no-dapps. + --dapps-user USERNAME Dapps server authentication has been removed. (default: {flag_dapps_user:?}) + --dapps-pass PASSWORD Dapps server authentication has been removed. (default: {flag_dapps_pass:?}) + --dapps-apis-all Dapps server is merged with RPC server. Use --jsonrpc-apis. (default: {flag_dapps_apis_all:?}) + --dapps-cors URL Dapps server is merged with RPC server. Use --jsonrpc-cors. (default: {flag_dapps_cors:?}) + --dapps-hosts HOSTS Dapps server is merged with RPC server. Use --jsonrpc-hosts. (default: {flag_dapps_hosts:?}) + --dapps-interface IP Dapps server is merged with RPC server. Use --jsonrpc-interface. (default: {flag_dapps_interface:?}) + --dapps-port PORT Dapps server is merged with RPC server. Use --jsonrpc-port. (default: {flag_dapps_port:?}) --rpc Does nothing; JSON-RPC is on by default now. --warp Does nothing; Warp sync is on by default. (default: {flag_warp}) --rpcaddr IP Equivalent to --jsonrpc-interface IP. diff --git a/parity/configuration.rs b/parity/configuration.rs index 5dd11bd90..1eb4c1848 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -132,12 +132,17 @@ impl Configuration { let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let geth_compatibility = self.args.flag_geth; let ui_address = self.ui_port().map(|port| (self.ui_interface(), port)); - let dapps_conf = self.dapps_config(); + let mut dapps_conf = self.dapps_config(); let ipfs_conf = self.ipfs_config(); let signer_conf = self.signer_config(); let secretstore_conf = self.secretstore_config(); let format = self.format()?; + if self.args.flag_jsonrpc_threads.is_some() && dapps_conf.enabled { + dapps_conf.enabled = false; + writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr.") + } + let cmd = if self.args.flag_version { Cmd::Version } else if self.args.cmd_signer { @@ -554,19 +559,12 @@ impl Configuration { fn dapps_config(&self) -> DappsConfiguration { DappsConfiguration { enabled: self.dapps_enabled(), - interface: self.dapps_interface(), - port: self.args.flag_dapps_port, - hosts: self.dapps_hosts(), - cors: self.dapps_cors(), - user: self.args.flag_dapps_user.clone(), - pass: self.args.flag_dapps_pass.clone(), dapps_path: PathBuf::from(self.directories().dapps), extra_dapps: if self.args.cmd_dapp { self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect() } else { vec![] }, - all_apis: self.args.flag_dapps_apis_all, } } @@ -746,14 +744,10 @@ impl Configuration { Self::cors(self.args.flag_ipfs_api_cors.as_ref()) } - fn dapps_cors(&self) -> Option> { - Self::cors(self.args.flag_dapps_cors.as_ref()) - } - fn hosts(hosts: &str) -> Option> { match hosts { "none" => return Some(Vec::new()), - "all" => return None, + "*" | "all" | "any" => return None, _ => {} } let hosts = hosts.split(',').map(Into::into).collect(); @@ -764,10 +758,6 @@ impl Configuration { Self::hosts(&self.args.flag_jsonrpc_hosts) } - fn dapps_hosts(&self) -> Option> { - Self::hosts(&self.args.flag_dapps_hosts) - } - fn ipfs_hosts(&self) -> Option> { Self::hosts(&self.args.flag_ipfs_api_hosts) } @@ -793,12 +783,17 @@ impl Configuration { fn http_config(&self) -> Result { let conf = HttpConfiguration { - enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + enabled: self.rpc_enabled(), interface: self.rpc_interface(), port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), apis: self.rpc_apis().parse()?, hosts: self.rpc_hosts(), cors: self.rpc_cors(), + threads: match self.args.flag_jsonrpc_threads { + Some(threads) if threads > 0 => Some(threads), + None => None, + _ => return Err("--jsonrpc-threads number needs to be positive.".into()), + } }; Ok(conf) @@ -809,7 +804,7 @@ impl Configuration { name: self.args.flag_identity.clone(), chain: self.chain(), network_port: self.args.flag_port, - rpc_enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + rpc_enabled: self.rpc_enabled(), rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()), rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), } @@ -916,13 +911,6 @@ impl Configuration { Self::interface(&self.network_settings().rpc_interface) } - fn dapps_interface(&self) -> String { - match self.args.flag_dapps_interface.as_str() { - "local" => "127.0.0.1", - x => x, - }.into() - } - fn ipfs_interface(&self) -> String { Self::interface(&self.args.flag_ipfs_api_interface) } @@ -938,8 +926,12 @@ impl Configuration { Self::interface(&self.args.flag_stratum_interface) } + fn rpc_enabled(&self) -> bool { + !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc + } + fn dapps_enabled(&self) -> bool { - !self.args.flag_dapps_off && !self.args.flag_no_dapps && cfg!(feature = "dapps") + !self.args.flag_dapps_off && !self.args.flag_no_dapps && self.rpc_enabled() && cfg!(feature = "dapps") } fn secretstore_enabled(&self) -> bool { @@ -1317,23 +1309,6 @@ mod tests { assert_eq!(conf3.rpc_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); } - #[test] - fn should_parse_dapps_hosts() { - // given - - // when - let conf0 = parse(&["parity"]); - let conf1 = parse(&["parity", "--dapps-hosts", "none"]); - let conf2 = parse(&["parity", "--dapps-hosts", "all"]); - let conf3 = parse(&["parity", "--dapps-hosts", "ethcore.io,something.io"]); - - // then - assert_eq!(conf0.dapps_hosts(), Some(Vec::new())); - assert_eq!(conf1.dapps_hosts(), Some(Vec::new())); - assert_eq!(conf2.dapps_hosts(), None); - assert_eq!(conf3.dapps_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); - } - #[test] fn should_parse_ipfs_hosts() { // given diff --git a/parity/dapps.rs b/parity/dapps.rs index bbd5f4960..e0e97c08f 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -19,25 +19,17 @@ use std::sync::Arc; use dir::default_data_path; use ethcore::client::Client; -use ethcore_rpc::informant::RpcStats; use ethsync::SyncProvider; use hash_fetch::fetch::Client as FetchClient; use helpers::replace_home; -use rpc_apis::{self, SignerService}; +use rpc_apis::SignerService; use parity_reactor; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { pub enabled: bool, - pub interface: String, - pub port: u16, - pub hosts: Option>, - pub cors: Option>, - pub user: Option, - pub pass: Option, pub dapps_path: PathBuf, pub extra_dapps: Vec, - pub all_apis: bool, } impl Default for Configuration { @@ -45,80 +37,56 @@ impl Default for Configuration { let data_dir = default_data_path(); Configuration { enabled: true, - interface: "127.0.0.1".into(), - port: 8080, - hosts: Some(Vec::new()), - cors: None, - user: None, - pass: None, dapps_path: replace_home(&data_dir, "$BASE/dapps").into(), extra_dapps: vec![], - all_apis: false, } } } pub struct Dependencies { - pub apis: Arc, pub client: Arc, pub sync: Arc, pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, - pub stats: Arc, } -pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { +pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { if !configuration.enabled { return Ok(None); } - let url = format!("{}:{}", configuration.interface, configuration.port); - let addr = url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url))?; - - let auth = configuration.user.as_ref().map(|username| { - let password = configuration.pass.as_ref().map_or_else(|| { - use rpassword::read_password; - println!("Type password for WebApps server (user: {}): ", username); - let pass = read_password().unwrap(); - println!("OK, got it. Starting server..."); - pass - }, |pass| pass.to_owned()); - (username.to_owned(), password) - }); - - Ok(Some(setup_dapps_server( + dapps_middleware( deps, configuration.dapps_path, configuration.extra_dapps, - &addr, - configuration.hosts, - configuration.cors, - auth, - configuration.all_apis, - )?)) + ).map(Some) } -pub use self::server::WebappServer; -pub use self::server::setup_dapps_server; +pub use self::server::Middleware; +pub use self::server::dapps_middleware; #[cfg(not(feature = "dapps"))] mod server { use super::Dependencies; - use std::net::SocketAddr; use std::path::PathBuf; + use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction}; - pub struct WebappServer; - pub fn setup_dapps_server( + pub struct Middleware; + + impl RequestMiddleware for Middleware { + fn on_request( + &self, req: &hyper::server::Request, control: &hyper::Control + ) -> RequestMiddlewareAction { + unreachable!() + } + } + + pub fn dapps_middleware( _deps: Dependencies, _dapps_path: PathBuf, _extra_dapps: Vec, - _url: &SocketAddr, - _allowed_hosts: Option>, - _cors: Option>, - _auth: Option<(String, String)>, - _all_apis: bool, - ) -> Result { + ) -> Result { Err("Your Parity version has been compiled without WebApps support.".into()) } } @@ -128,78 +96,41 @@ mod server { use super::Dependencies; use std::path::PathBuf; use std::sync::Arc; - use std::net::SocketAddr; - use std::io; use util::{Bytes, Address, U256}; - use ansi_term::Colour; use ethcore::transaction::{Transaction, Action}; use ethcore::client::{Client, BlockChainClient, BlockId}; - use ethcore_dapps::{AccessControlAllowOrigin, Host}; use ethcore_rpc::is_major_importing; + use hash_fetch::fetch::Client as FetchClient; use hash_fetch::urlhint::ContractClient; + use parity_dapps; use parity_reactor; - use rpc_apis; - pub use ethcore_dapps::Server as WebappServer; + pub type Middleware = parity_dapps::Middleware; - pub fn setup_dapps_server( + pub fn dapps_middleware( deps: Dependencies, dapps_path: PathBuf, extra_dapps: Vec, - url: &SocketAddr, - allowed_hosts: Option>, - cors: Option>, - auth: Option<(String, String)>, - all_apis: bool, - ) -> Result { - use ethcore_dapps as dapps; - - let server = dapps::ServerBuilder::new( - &dapps_path, - Arc::new(Registrar { client: deps.client.clone() }), - parity_reactor::Remote::new(deps.remote.clone()), - ); - let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); - let cors: Option> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); - - let sync = deps.sync.clone(); - let client = deps.client.clone(); + ) -> Result { + let sync = deps.sync; let signer = deps.signer.clone(); - let server = server - .fetch(deps.fetch.clone()) - .sync_status(Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info()))) - .web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token))) - .extra_dapps(&extra_dapps) - .signer_address(deps.signer.address()) - .allowed_hosts(allowed_hosts.into()) - .extra_cors_headers(cors.into()); + let client = deps.client; + let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); + let registrar = Arc::new(Registrar { client: client.clone() }); + let sync_status = Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())); + let web_proxy_tokens = Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token)); - let api_set = if all_apis { - warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Dapps with all APIs exposed.")); - info!("If you do not intend this, exit now."); - rpc_apis::ApiSet::SafeContext - } else { - rpc_apis::ApiSet::UnsafeContext - }; - let apis = rpc_apis::setup_rpc(deps.stats, deps.apis.clone(), api_set); - let start_result = match auth { - None => { - server.start_unsecured_http(url, apis, deps.remote) - }, - Some((username, password)) => { - server.start_basic_auth_http(url, &username, &password, apis, deps.remote) - }, - }; - - match start_result { - Err(dapps::ServerError::IoError(err)) => match err.kind() { - io::ErrorKind::AddrInUse => Err(format!("WebApps address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --dapps-port and --dapps-interface options.", url)), - _ => Err(format!("WebApps io error: {}", err)), - }, - Err(e) => Err(format!("WebApps error: {:?}", e)), - Ok(server) => Ok(server), - } + Ok(parity_dapps::Middleware::new( + parity_remote, + deps.signer.address(), + dapps_path, + extra_dapps, + registrar, + sync_status, + web_proxy_tokens, + deps.fetch.clone(), + )) } struct Registrar { diff --git a/parity/deprecated.rs b/parity/deprecated.rs index 97c6ffe4a..820181efa 100644 --- a/parity/deprecated.rs +++ b/parity/deprecated.rs @@ -21,94 +21,89 @@ use cli::Args; pub enum Deprecated { DoesNothing(&'static str), Replaced(&'static str, &'static str), + Removed(&'static str), } impl fmt::Display for Deprecated { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { - Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s), - Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new), + Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default.", s), + Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead.", old, new), + Deprecated::Removed(s) => write!(f, "Option '{}' has been removed and is no longer supported.", s) } } } -impl Deprecated { - fn jsonrpc() -> Self { - Deprecated::DoesNothing("--jsonrpc") - } - - fn rpc() -> Self { - Deprecated::DoesNothing("--rpc") - } - - fn jsonrpc_off() -> Self { - Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc") - } - - fn webapp() -> Self { - Deprecated::DoesNothing("--webapp") - } - - fn dapps_off() -> Self { - Deprecated::Replaced("--dapps-off", "--no-dapps") - } - - fn ipcdisable() -> Self { - Deprecated::Replaced("--ipcdisable", "--no-ipc") - } - - fn ipc_off() -> Self { - Deprecated::Replaced("--ipc-off", "--no-ipc") - } - - fn etherbase() -> Self { - Deprecated::Replaced("--etherbase", "--author") - } - - fn extradata() -> Self { - Deprecated::Replaced("--extradata", "--extra-data") - } -} - pub fn find_deprecated(args: &Args) -> Vec { let mut result = vec![]; if args.flag_jsonrpc { - result.push(Deprecated::jsonrpc()); + result.push(Deprecated::DoesNothing("--jsonrpc")); } if args.flag_rpc { - result.push(Deprecated::rpc()); + result.push(Deprecated::DoesNothing("--rpc")); } if args.flag_jsonrpc_off { - result.push(Deprecated::jsonrpc_off()); + result.push(Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")); } if args.flag_webapp { - result.push(Deprecated::webapp()) + result.push(Deprecated::DoesNothing("--webapp")); } if args.flag_dapps_off { - result.push(Deprecated::dapps_off()); + result.push(Deprecated::Replaced("--dapps-off", "--no-dapps")); } if args.flag_ipcdisable { - result.push(Deprecated::ipcdisable()); + result.push(Deprecated::Replaced("--ipcdisable", "--no-ipc")); } if args.flag_ipc_off { - result.push(Deprecated::ipc_off()); + result.push(Deprecated::Replaced("--ipc-off", "--no-ipc")); } if args.flag_etherbase.is_some() { - result.push(Deprecated::etherbase()); + result.push(Deprecated::Replaced("--etherbase", "--author")); } if args.flag_extradata.is_some() { - result.push(Deprecated::extradata()); + result.push(Deprecated::Replaced("--extradata", "--extra-data")); } + // Removed in 1.7 + if args.flag_dapps_port.is_some() { + result.push(Deprecated::Replaced("--dapps-port", "--jsonrpc-port")); + } + + if args.flag_dapps_interface.is_some() { + result.push(Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface")); + } + + if args.flag_dapps_hosts.is_some() { + result.push(Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts")); + } + + if args.flag_dapps_cors.is_some() { + result.push(Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors")); + } + + if args.flag_dapps_user.is_some() { + result.push(Deprecated::Removed("--dapps-user")); + } + + if args.flag_dapps_pass.is_some() { + result.push(Deprecated::Removed("--dapps-pass")); + } + + if args.flag_dapps_apis_all.is_some() { + result.push(Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis")); + } + + // Removed in 1.8 + result } @@ -131,17 +126,31 @@ mod tests { args.flag_ipc_off = true; args.flag_etherbase = Some(Default::default()); args.flag_extradata = Some(Default::default()); + args.flag_dapps_port = Some(Default::default()); + args.flag_dapps_interface = Some(Default::default()); + args.flag_dapps_hosts = Some(Default::default()); + args.flag_dapps_cors = Some(Default::default()); + args.flag_dapps_user = Some(Default::default()); + args.flag_dapps_pass = Some(Default::default()); + args.flag_dapps_apis_all = Some(Default::default()); args }), vec![ - Deprecated::jsonrpc(), - Deprecated::rpc(), - Deprecated::jsonrpc_off(), - Deprecated::webapp(), - Deprecated::dapps_off(), - Deprecated::ipcdisable(), - Deprecated::ipc_off(), - Deprecated::etherbase(), - Deprecated::extradata(), + Deprecated::DoesNothing("--jsonrpc"), + Deprecated::DoesNothing("--rpc"), + Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc"), + Deprecated::DoesNothing("--webapp"), + Deprecated::Replaced("--dapps-off", "--no-dapps"), + Deprecated::Replaced("--ipcdisable", "--no-ipc"), + Deprecated::Replaced("--ipc-off", "--no-ipc"), + Deprecated::Replaced("--etherbase", "--author"), + Deprecated::Replaced("--extradata", "--extra-data"), + Deprecated::Replaced("--dapps-port", "--jsonrpc-port"), + Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface"), + Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts"), + Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors"), + Deprecated::Removed("--dapps-user"), + Deprecated::Removed("--dapps-pass"), + Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis"), ]); } } diff --git a/parity/ipfs.rs b/parity/ipfs.rs index 760868f91..45c3f7062 100644 --- a/parity/ipfs.rs +++ b/parity/ipfs.rs @@ -15,10 +15,9 @@ // along with Parity. If not, see . use std::sync::Arc; -use parity_ipfs_api::{self, AccessControlAllowOrigin, Host}; +use parity_ipfs_api::{self, AccessControlAllowOrigin, Host, Listening}; use parity_ipfs_api::error::ServerError; use ethcore::client::BlockChainClient; -use hyper::server::Listening; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { diff --git a/parity/main.rs b/parity/main.rs index 2044b3ee0..4b6dc6dab 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -28,7 +28,6 @@ extern crate ctrlc; extern crate docopt; extern crate env_logger; extern crate fdlimit; -extern crate hyper; extern crate isatty; extern crate jsonrpc_core; extern crate num_cpus; @@ -73,7 +72,11 @@ extern crate ethcore_stratum; extern crate ethcore_secretstore; #[cfg(feature = "dapps")] -extern crate ethcore_dapps; +extern crate parity_dapps; + +#[cfg(test)] +#[macro_use] +extern crate pretty_assertions; #[cfg(windows)] extern crate ws2_32; #[cfg(windows)] extern crate winapi; diff --git a/parity/rpc.rs b/parity/rpc.rs index a435f24db..254bb782e 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -14,24 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::fmt; +use std::{io, fmt}; use std::sync::Arc; -use std::net::SocketAddr; -use std::io; +use dapps; use dir::default_data_path; -use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use ethcore_rpc::informant::{RpcStats, Middleware}; +use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use helpers::parity_ipc_path; -use hyper; use jsonrpc_core::MetaIoHandler; -use rpc_apis; -use rpc_apis::ApiSet; use parity_reactor::TokioRemote; +use rpc_apis::{self, ApiSet}; -pub use ethcore_rpc::{IpcServer, HttpServer}; +pub use ethcore_rpc::{IpcServer, HttpServer, RequestMiddleware}; -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct HttpConfiguration { pub enabled: bool, pub interface: String, @@ -39,6 +36,7 @@ pub struct HttpConfiguration { pub apis: ApiSet, pub cors: Option>, pub hosts: Option>, + pub threads: Option, } impl Default for HttpConfiguration { @@ -50,6 +48,7 @@ impl Default for HttpConfiguration { apis: ApiSet::UnsafeContext, cors: None, hosts: Some(Vec::new()), + threads: None, } } } @@ -89,13 +88,17 @@ pub struct Dependencies { } pub struct RpcExtractor; -impl rpc::HttpMetaExtractor for RpcExtractor { - fn read_metadata(&self, req: &hyper::server::Request) -> Metadata { - let origin = req.headers().get::() - .map(|origin| format!("{}://{}", origin.scheme, origin.host)) - .unwrap_or_else(|| "unknown".into()); +impl rpc::HttpMetaExtractor for RpcExtractor { + type Metadata = Metadata; + + fn read_metadata(&self, origin: String, dapps_origin: Option) -> Metadata { let mut metadata = Metadata::default(); - metadata.origin = Origin::Rpc(origin); + + metadata.origin = match (origin.as_str(), dapps_origin) { + ("null", Some(dapp)) => Origin::Dapps(dapp.into()), + _ => Origin::Rpc(origin), + }; + metadata } } @@ -109,52 +112,92 @@ impl rpc::IpcMetaExtractor for RpcExtractor { } } -pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { +fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler { + rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) +} + +pub fn new_http(conf: HttpConfiguration, deps: &Dependencies, middleware: Option) -> Result, String> { if !conf.enabled { return Ok(None); } let url = format!("{}:{}", conf.interface, conf.port); let addr = url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))?; - Ok(Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis)?)) -} + let handler = setup_apis(conf.apis, deps); + let remote = deps.remote.clone(); -fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler { - rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) -} + let cors_domains: Option> = conf.cors.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); + let allowed_hosts: Option> = conf.hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + + let start_result = rpc::start_http( + &addr, + cors_domains.into(), + allowed_hosts.into(), + handler, + remote, + RpcExtractor, + match (conf.threads, middleware) { + (Some(threads), None) => rpc::HttpSettings::Threads(threads), + (None, middleware) => rpc::HttpSettings::Dapps(middleware), + (Some(_), Some(_)) => { + return Err("Dapps and fast multi-threaded RPC server cannot be enabled at the same time.".into()) + }, + } + ); -pub fn setup_http_rpc_server( - dependencies: &Dependencies, - url: &SocketAddr, - cors_domains: Option>, - allowed_hosts: Option>, - apis: ApiSet -) -> Result { - let handler = setup_apis(apis, dependencies); - let remote = dependencies.remote.clone(); - let cors_domains: Option> = cors_domains.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); - let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); - let start_result = rpc::start_http(url, cors_domains.into(), allowed_hosts.into(), handler, remote, RpcExtractor); match start_result { - Err(HttpServerError::IoError(err)) => match err.kind() { - io::ErrorKind::AddrInUse => Err(format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)), + Ok(server) => Ok(Some(server)), + Err(HttpServerError::Io(err)) => match err.kind() { + io::ErrorKind::AddrInUse => Err( + format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url) + ), _ => Err(format!("RPC io error: {}", err)), }, Err(e) => Err(format!("RPC error: {:?}", e)), - Ok(server) => Ok(server), } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { - if !conf.enabled { return Ok(None); } - Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) -} - -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { - let handler = setup_apis(apis, dependencies); +pub fn new_ipc(conf: IpcConfiguration, dependencies: &Dependencies) -> Result, String> { + if !conf.enabled { + return Ok(None); + } + let handler = setup_apis(conf.apis, dependencies); let remote = dependencies.remote.clone(); - match rpc::start_ipc(addr, handler, remote, RpcExtractor) { + match rpc::start_ipc(&conf.socket_addr, handler, remote, RpcExtractor) { + Ok(server) => Ok(Some(server)), Err(io_error) => Err(format!("RPC io error: {}", io_error)), - Ok(server) => Ok(server) + } +} + +#[cfg(test)] +mod tests { + use super::RpcExtractor; + use ethcore_rpc::{HttpMetaExtractor, Origin}; + + #[test] + fn should_extract_rpc_origin() { + // given + let extractor = RpcExtractor; + + // when + let meta = extractor.read_metadata("http://parity.io".into(), None); + let meta1 = extractor.read_metadata("http://parity.io".into(), Some("ignored".into())); + + // then + assert_eq!(meta.origin, Origin::Rpc("http://parity.io".into())); + assert_eq!(meta1.origin, Origin::Rpc("http://parity.io".into())); + } + + #[test] + fn should_dapps_origin() { + // given + let extractor = RpcExtractor; + let dapp = "https://wallet.ethereum.org".to_owned(); + + // when + let meta = extractor.read_metadata("null".into(), Some(dapp.clone())); + + // then + assert_eq!(meta.origin, Origin::Dapps(dapp.into())); } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index e168f029c..dbeeea962 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -81,7 +81,7 @@ impl FromStr for Api { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum ApiSet { SafeContext, UnsafeContext, diff --git a/parity/run.rs b/parity/run.rs index c438c25a5..a85bcc39b 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -36,7 +36,6 @@ use updater::{UpdatePolicy, Updater}; use parity_reactor::EventLoop; use hash_fetch::fetch::{Fetch, Client as FetchClient}; -use rpc::{HttpConfiguration, IpcConfiguration}; use params::{ SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool @@ -76,8 +75,8 @@ pub struct RunCmd { pub daemon: Option, pub logger_config: LogConfig, pub miner_options: MinerOptions, - pub http_conf: HttpConfiguration, - pub ipc_conf: IpcConfiguration, + pub http_conf: rpc::HttpConfiguration, + pub ipc_conf: rpc::IpcConfiguration, pub net_conf: NetworkConfiguration, pub network_id: Option, pub warp_sync: bool, @@ -110,11 +109,7 @@ pub struct RunCmd { pub verifier_settings: VerifierSettings, } -pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { - if !dapps_conf.enabled { - return Err("Cannot use UI command with Dapps turned off.".into()) - } - +pub fn open_ui(signer_conf: &signer::Configuration) -> Result<(), String> { if !signer_conf.enabled { return Err("Cannot use UI command with UI turned off.".into()) } @@ -127,12 +122,12 @@ pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configur Ok(()) } -pub fn open_dapp(dapps_conf: &dapps::Configuration, dapp: &str) -> Result<(), String> { +pub fn open_dapp(dapps_conf: &dapps::Configuration, rpc_conf: &rpc::HttpConfiguration, dapp: &str) -> Result<(), String> { if !dapps_conf.enabled { return Err("Cannot use DAPP command with Dapps turned off.".into()) } - let url = format!("http://{}:{}/{}/", dapps_conf.interface, dapps_conf.port, dapp); + let url = format!("http://{}:{}/{}/", rpc_conf.interface, rpc_conf.port, dapp); url::open(&url); Ok(()) } @@ -156,9 +151,9 @@ impl ::local_store::NodeInfo for FullNodeInfo { pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running - let addr = format!("{}:{}", cmd.dapps_conf.interface, cmd.dapps_conf.port); + let addr = format!("{}:{}", cmd.signer_conf.interface, cmd.signer_conf.port); if !TcpListener::bind(&addr as &str).is_ok() { - return open_ui(&cmd.dapps_conf, &cmd.signer_conf).map(|_| (false, None)); + return open_ui(&cmd.signer_conf).map(|_| (false, None)); } } @@ -429,11 +424,11 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R updater: updater.clone(), geth_compatibility: cmd.geth_compatibility, dapps_interface: match cmd.dapps_conf.enabled { - true => Some(cmd.dapps_conf.interface.clone()), + true => Some(cmd.http_conf.interface.clone()), false => None, }, dapps_port: match cmd.dapps_conf.enabled { - true => Some(cmd.dapps_conf.port), + true => Some(cmd.http_conf.port), false => None, }, fetch: fetch.clone(), @@ -445,21 +440,19 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R stats: rpc_stats.clone(), }; - // start rpc servers - let http_server = rpc::new_http(cmd.http_conf, &dependencies)?; - let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; - - // the dapps server + // the dapps middleware let dapps_deps = dapps::Dependencies { - apis: deps_for_rpc_apis.clone(), client: client.clone(), sync: sync_provider.clone(), remote: event_loop.raw_remote(), fetch: fetch.clone(), signer: deps_for_rpc_apis.signer_service.clone(), - stats: rpc_stats.clone(), }; - let dapps_server = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; + let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; + + // start rpc servers + let http_server = rpc::new_http(cmd.http_conf.clone(), &dependencies, dapps_middleware)?; + let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; // the signer server let signer_deps = signer::Dependencies { @@ -524,18 +517,18 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // start ui if cmd.ui { - open_ui(&cmd.dapps_conf, &cmd.signer_conf)?; + open_ui(&cmd.signer_conf)?; } if let Some(dapp) = cmd.dapp { - open_dapp(&cmd.dapps_conf, &dapp)?; + open_dapp(&cmd.dapps_conf, &cmd.http_conf, &dapp)?; } // Handle exit let restart = wait_for_exit(panic_handler, Some(updater), Some(client), can_restart); // drop this stuff as soon as exit detected. - drop((http_server, ipc_server, dapps_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); + drop((http_server, ipc_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); info!("Finishing work, please wait..."); diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 960dc5102..cbfecf366 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -21,6 +21,7 @@ transient-hashmap = "0.4" jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-minihttp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index abc51f2ed..247edd8f7 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -29,8 +29,9 @@ extern crate time; extern crate transient_hashmap; extern crate jsonrpc_core; -pub extern crate jsonrpc_http_server as http; -pub extern crate jsonrpc_ipc_server as ipc; +extern crate jsonrpc_http_server as http; +extern crate jsonrpc_minihttp_server as minihttp; +extern crate jsonrpc_ipc_server as ipc; extern crate ethash; extern crate ethcore; @@ -62,10 +63,15 @@ extern crate ethjson; #[cfg(test)] extern crate ethcore_devtools as devtools; +mod metadata; pub mod v1; pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext}; -pub use http::{HttpMetaExtractor, Server as HttpServer, Error as HttpServerError, AccessControlAllowOrigin, Host}; +pub use http::{ + hyper, + RequestMiddleware, RequestMiddlewareAction, + AccessControlAllowOrigin, Host, +}; pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; pub use v1::block_import::is_major_importing; @@ -73,26 +79,98 @@ pub use v1::block_import::is_major_importing; use std::net::SocketAddr; use http::tokio_core; +/// RPC HTTP Server instance +pub enum HttpServer { + /// Fast MiniHTTP variant + Mini(minihttp::Server), + /// Hyper variant + Hyper(http::Server), +} + +/// RPC HTTP Server error +#[derive(Debug)] +pub enum HttpServerError { + /// IO error + Io(::std::io::Error), + /// Other hyper error + Hyper(hyper::Error), +} + +impl From for HttpServerError { + fn from(e: http::Error) -> Self { + use self::HttpServerError::*; + match e { + http::Error::Io(io) => Io(io), + http::Error::Other(hyper) => Hyper(hyper), + } + } +} + +impl From for HttpServerError { + fn from(e: minihttp::Error) -> Self { + use self::HttpServerError::*; + match e { + minihttp::Error::Io(io) => Io(io), + } + } +} + +/// HTTP RPC server impl-independent metadata extractor +pub trait HttpMetaExtractor: Send + Sync + 'static { + /// Type of Metadata + type Metadata: jsonrpc_core::Metadata; + /// Extracts metadata from given params. + fn read_metadata(&self, origin: String, dapps_origin: Option) -> Self::Metadata; +} + +/// HTTP server implementation-specific settings. +pub enum HttpSettings { + /// Enable fast minihttp server with given number of threads. + Threads(usize), + /// Enable standard server with optional dapps middleware. + Dapps(Option), +} + /// Start http server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_http( +pub fn start_http( addr: &SocketAddr, cors_domains: http::DomainsValidation, allowed_hosts: http::DomainsValidation, handler: H, remote: tokio_core::reactor::Remote, extractor: T, + settings: HttpSettings, ) -> Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, H: Into>, - T: HttpMetaExtractor, + T: HttpMetaExtractor, + R: RequestMiddleware, { - http::ServerBuilder::new(handler) - .event_loop_remote(remote) - .meta_extractor(extractor) - .cors(cors_domains.into()) - .allowed_hosts(allowed_hosts.into()) - .start_http(addr) + Ok(match settings { + HttpSettings::Dapps(middleware) => { + let mut builder = http::ServerBuilder::new(handler) + .event_loop_remote(remote) + .meta_extractor(metadata::HyperMetaExtractor::new(extractor)) + .cors(cors_domains.into()) + .allowed_hosts(allowed_hosts.into()); + + if let Some(dapps) = middleware { + builder = builder.request_middleware(dapps) + } + builder.start_http(addr) + .map(HttpServer::Hyper)? + }, + HttpSettings::Threads(threads) => { + minihttp::ServerBuilder::new(handler) + .threads(threads) + .meta_extractor(metadata::MiniMetaExtractor::new(extractor)) + .cors(cors_domains.into()) + .allowed_hosts(allowed_hosts.into()) + .start_http(addr) + .map(HttpServer::Mini)? + }, + }) } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. diff --git a/rpc/src/metadata.rs b/rpc/src/metadata.rs new file mode 100644 index 000000000..af3a5d183 --- /dev/null +++ b/rpc/src/metadata.rs @@ -0,0 +1,74 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core; +use http; +use hyper; +use minihttp; +use HttpMetaExtractor; + +pub struct HyperMetaExtractor { + extractor: T, +} + +impl HyperMetaExtractor { + pub fn new(extractor: T) -> Self { + HyperMetaExtractor { + extractor: extractor, + } + } +} + +impl http::MetaExtractor for HyperMetaExtractor where + T: HttpMetaExtractor, + M: jsonrpc_core::Metadata, +{ + fn read_metadata(&self, req: &hyper::server::Request) -> M { + let origin = req.headers().get::() + .map(|origin| format!("{}://{}", origin.scheme, origin.host)) + .unwrap_or_else(|| "unknown".into()); + let dapps_origin = req.headers().get_raw("x-parity-origin") + .and_then(|raw| raw.one()) + .map(|raw| String::from_utf8_lossy(raw).into_owned()); + self.extractor.read_metadata(origin, dapps_origin) + } +} + +pub struct MiniMetaExtractor { + extractor: T, +} + +impl MiniMetaExtractor { + pub fn new(extractor: T) -> Self { + MiniMetaExtractor { + extractor: extractor, + } + } +} + +impl minihttp::MetaExtractor for MiniMetaExtractor where + T: HttpMetaExtractor, + M: jsonrpc_core::Metadata, +{ + fn read_metadata(&self, req: &minihttp::Req) -> M { + let origin = req.header("origin") + .unwrap_or_else(|| "unknown") + .to_owned(); + let dapps_origin = req.header("x-parity-origin").map(|h| h.to_owned()); + + self.extractor.read_metadata(origin, dapps_origin) + } +} diff --git a/scripts/targets.sh b/scripts/targets.sh index 505875336..040485d85 100644 --- a/scripts/targets.sh +++ b/scripts/targets.sh @@ -5,7 +5,7 @@ export TARGETS=" -p ethash \ -p ethcore \ -p ethcore-bigint\ - -p ethcore-dapps \ + -p parity-dapps \ -p ethcore-rpc \ -p ethcore-signer \ -p ethcore-util \ From c0c06fdc53a912cb39f970b9086283aeee89b27b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 3 Apr 2017 12:13:51 +0300 Subject: [PATCH 78/91] Secretstore over network (#4974) * ECDKG protocol prototype * added test for enc/dec math * get rid of decryption_session * added licenses * fix after merge * get rid of unused serde dependency * doc * decryption session [without commutative enc] * failed_dec_session * fixed tests * added commen * added more decryption session tests * helper to localize an issue * more computations to localize error * decryption_session::SessionParams * added tests for EC math to localize problem * secretstore network transport * encryption_session_works_over_network * network errors processing * connecting to KeyServer * licenses * get rid of debug println-s * fixed secretstore args * encryption results are stored in KS database * decryption protocol works over network * enc/dec Session traits * fixing warnings * fix after merge * finally fixed -of-N-scheme * temporary commented test * 1-of-N works in math * scheme 1-of-N works * remove unnecessary unsafety * fixed grumbles * fix grumbles * lost files --- Cargo.lock | 11 + Cargo.toml | 1 + ethcrypto/src/lib.rs | 6 + ethkey/src/keypair.rs | 1 + parity/main.rs | 1 + parity/secretstore.rs | 32 +- secret_store/Cargo.toml | 10 + secret_store/src/http_listener.rs | 113 ++- secret_store/src/key_server.rs | 220 +++-- .../src/key_server_cluster/cluster.rs | 926 +++++++++++++++++- .../key_server_cluster/decryption_session.rs | 387 +++++--- .../key_server_cluster/encryption_session.rs | 666 +++++++++---- .../src/key_server_cluster/io/deadline.rs | 85 ++ .../src/key_server_cluster/io/handshake.rs | 320 ++++++ .../src/key_server_cluster/io/message.rs | 247 +++++ secret_store/src/key_server_cluster/io/mod.rs | 34 + .../src/key_server_cluster/io/read_header.rs | 44 + .../src/key_server_cluster/io/read_message.rs | 86 ++ .../src/key_server_cluster/io/read_payload.rs | 64 ++ .../io/shared_tcp_stream.rs | 60 ++ .../key_server_cluster/io/write_message.rs | 70 ++ secret_store/src/key_server_cluster/math.rs | 34 +- .../src/key_server_cluster/message.rs | 270 ++++- secret_store/src/key_server_cluster/mod.rs | 101 +- .../net/accept_connection.rs | 63 ++ .../src/key_server_cluster/net/connect.rs | 90 ++ .../src/key_server_cluster/net/connection.rs | 32 + .../src/key_server_cluster/net/mod.rs | 23 + secret_store/src/key_storage.rs | 125 ++- secret_store/src/lib.rs | 23 +- secret_store/src/serialization.rs | 260 +++++ secret_store/src/traits.rs | 2 + secret_store/src/types/all.rs | 61 +- 33 files changed, 3894 insertions(+), 574 deletions(-) create mode 100644 secret_store/src/key_server_cluster/io/deadline.rs create mode 100644 secret_store/src/key_server_cluster/io/handshake.rs create mode 100644 secret_store/src/key_server_cluster/io/message.rs create mode 100644 secret_store/src/key_server_cluster/io/mod.rs create mode 100644 secret_store/src/key_server_cluster/io/read_header.rs create mode 100644 secret_store/src/key_server_cluster/io/read_message.rs create mode 100644 secret_store/src/key_server_cluster/io/read_payload.rs create mode 100644 secret_store/src/key_server_cluster/io/shared_tcp_stream.rs create mode 100644 secret_store/src/key_server_cluster/io/write_message.rs create mode 100644 secret_store/src/key_server_cluster/net/accept_connection.rs create mode 100644 secret_store/src/key_server_cluster/net/connect.rs create mode 100644 secret_store/src/key_server_cluster/net/connection.rs create mode 100644 secret_store/src/key_server_cluster/net/mod.rs create mode 100644 secret_store/src/serialization.rs diff --git a/Cargo.lock b/Cargo.lock index 83e643900..aecf8960f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,6 +23,7 @@ dependencies = [ "ethcore-signer 1.7.0", "ethcore-stratum 1.7.0", "ethcore-util 1.7.0", + "ethkey 0.2.0", "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -633,6 +634,7 @@ dependencies = [ name = "ethcore-secretstore" version = "1.0.0" dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", @@ -640,9 +642,18 @@ dependencies = [ "ethcore-util 1.7.0", "ethcrypto 0.1.0", "ethkey 0.2.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index b82490e88..1d0e27a71 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ ethcore-ipc-hypervisor = { path = "ipc/hypervisor" } ethcore-light = { path = "ethcore/light" } ethcore-logger = { path = "logger" } ethcore-stratum = { path = "stratum" } +ethkey = { path = "ethkey" } evmbin = { path = "evmbin" } rlp = { path = "util/rlp" } rpc-cli = { path = "rpc_cli" } diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs index a4d426b54..9c1352087 100644 --- a/ethcrypto/src/lib.rs +++ b/ethcrypto/src/lib.rs @@ -78,6 +78,12 @@ impl fmt::Display for Error { } } +impl Into for Error { + fn into(self) -> String { + format!("{}", self) + } +} + impl From for Error { fn from(e: SecpError) -> Self { Error::Secp(e) diff --git a/ethkey/src/keypair.rs b/ethkey/src/keypair.rs index b25664cd7..f883c4738 100644 --- a/ethkey/src/keypair.rs +++ b/ethkey/src/keypair.rs @@ -27,6 +27,7 @@ pub fn public_to_address(public: &Public) -> Address { result } +#[derive(Clone)] /// secp256k1 key pair pub struct KeyPair { secret: Secret, diff --git a/parity/main.rs b/parity/main.rs index 4b6dc6dab..1063571a9 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -53,6 +53,7 @@ extern crate ethcore_logger; extern crate ethcore_rpc; extern crate ethcore_signer; extern crate ethcore_util as util; +extern crate ethkey; extern crate ethsync; extern crate parity_hash_fetch as hash_fetch; extern crate parity_ipfs_api; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 79a209504..13d6d28d2 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -53,6 +53,7 @@ mod server { #[cfg(feature="secretstore")] mod server { + use ethkey; use ethcore_secretstore; use super::{Configuration, Dependencies}; @@ -64,10 +65,35 @@ mod server { impl KeyServer { /// Create new key server pub fn new(conf: Configuration, _deps: Dependencies) -> Result { + let key_pairs = vec![ + ethkey::KeyPair::from_secret("6c26a76e9b31048d170873a791401c7e799a11f0cefc0171cc31a49800967509".parse().unwrap()).unwrap(), + ethkey::KeyPair::from_secret("7e94018b3731afdb3b4e6f4c3e179475640166da12e1d1b0c7d80729b1a5b452".parse().unwrap()).unwrap(), + ethkey::KeyPair::from_secret("5ab6ed2a52c33142380032c39a03a86b12eacb3fa4b53bc16d84f51318156f8c".parse().unwrap()).unwrap(), + ]; let conf = ethcore_secretstore::ServiceConfiguration { - listener_addr: conf.interface, - listener_port: conf.port, - data_path: conf.data_path, + listener_address: ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: conf.port, + }, + data_path: conf.data_path.clone(), + // TODO: this is test configuration. how it will be configured in production? + cluster_config: ethcore_secretstore::ClusterConfiguration { + threads: 4, + self_private: (***key_pairs[(conf.port - 8082) as usize].secret()).into(), + listener_address: ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: conf.port + 10, + }, + nodes: key_pairs.iter().enumerate().map(|(i, kp)| (kp.public().clone(), + ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: 8082 + 10 + (i as u16), + })).collect(), + allow_connecting_to_higher_nodes: true, + encryption_config: ethcore_secretstore::EncryptionConfiguration { + key_check_timeout_ms: 1000, + }, + } }; let key_server = ethcore_secretstore::start(conf) diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index eff7c1ef0..fba76804b 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -10,9 +10,19 @@ build = "build.rs" ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] +byteorder = "1.0" log = "0.3" parking_lot = "0.4" hyper = { version = "0.10", default-features = false } +serde = "0.9" +serde_json = "0.9" +serde_derive = "0.9" +futures = "0.1" +futures-cpupool = "0.1" +rustc-serialize = "0.3" +tokio-core = "0.1" +tokio-service = "0.1" +tokio-proto = "0.1" url = "1.0" ethcore-devtools = { path = "../devtools" } ethcore-util = { path = "../util" } diff --git a/secret_store/src/http_listener.rs b/secret_store/src/http_listener.rs index 92799d221..79fe71330 100644 --- a/secret_store/src/http_listener.rs +++ b/secret_store/src/http_listener.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::str::FromStr; use std::sync::Arc; use hyper::header; use hyper::uri::RequestUri; @@ -39,7 +38,9 @@ pub struct KeyServerHttpListener { enum Request { /// Invalid request Invalid, - /// Request encryption key of given document for given requestor + /// Generate encryption key. + GenerateDocumentKey(DocumentAddress, RequestSignature, usize), + /// Request encryption key of given document for given requestor. GetDocumentKey(DocumentAddress, RequestSignature), } @@ -63,9 +64,9 @@ impl KeyServerHttpListener where T: KeyServer + 'static { handler: shared_handler.clone(), }; - let listener_addr: &str = &format!("{}:{}", config.listener_addr, config.listener_port); - let http_server = HttpServer::http(&listener_addr).unwrap(); - let http_server = http_server.handle(handler).unwrap(); + let listener_addr: &str = &format!("{}:{}", config.listener_address.address, config.listener_address.port); + let http_server = HttpServer::http(&listener_addr).expect("cannot start HttpServer"); + let http_server = http_server.handle(handler).expect("cannot start HttpServer"); let listener = KeyServerHttpListener { _http_server: http_server, handler: shared_handler, @@ -75,6 +76,10 @@ impl KeyServerHttpListener where T: KeyServer + 'static { } impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static { + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { + self.handler.key_server.generate_document_key(signature, document, threshold) + } + fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { self.handler.key_server.document_key(signature, document) } @@ -82,95 +87,103 @@ impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static { impl HttpHandler for KeyServerHttpHandler where T: KeyServer + 'static { fn handle(&self, req: HttpRequest, mut res: HttpResponse) { - if req.method != HttpMethod::Get { - warn!(target: "secretstore", "Ignoring {}-request {}", req.method, req.uri); - *res.status_mut() = HttpStatusCode::NotFound; - return; - } - if req.headers.has::() { warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri); *res.status_mut() = HttpStatusCode::NotFound; return; } - match req.uri { - RequestUri::AbsolutePath(ref path) => match parse_request(&path) { - Request::GetDocumentKey(document, signature) => { - let document_key = self.handler.key_server.document_key(&signature, &document) + let req_method = req.method.clone(); + let req_uri = req.uri.clone(); + match &req_uri { + &RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path) { + Request::GenerateDocumentKey(document, signature, threshold) => { + return_document_key(req, res, self.handler.key_server.generate_document_key(&signature, &document, threshold) .map_err(|err| { - warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req.uri, err); + warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err); err - }); - match document_key { - Ok(document_key) => { - let document_key = document_key.to_hex().into_bytes(); - res.headers_mut().set(header::ContentType::plaintext()); - if let Err(err) = res.send(&document_key) { - // nothing to do, but log error - warn!(target: "secretstore", "GetDocumentKey request {} response has failed with: {}", req.uri, err); - } - }, - Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest, - Err(Error::AccessDenied) => *res.status_mut() = HttpStatusCode::Forbidden, - Err(Error::DocumentNotFound) => *res.status_mut() = HttpStatusCode::NotFound, - Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, - Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, - } + })); + }, + Request::GetDocumentKey(document, signature) => { + return_document_key(req, res, self.handler.key_server.document_key(&signature, &document) + .map_err(|err| { + warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err); + err + })); }, Request::Invalid => { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri); + warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); *res.status_mut() = HttpStatusCode::BadRequest; }, }, _ => { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri); + warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); *res.status_mut() = HttpStatusCode::NotFound; }, }; } } -fn parse_request(uri_path: &str) -> Request { +fn return_document_key(req: HttpRequest, mut res: HttpResponse, document_key: Result) { + match document_key { + Ok(document_key) => { + let document_key = document_key.to_hex().into_bytes(); + res.headers_mut().set(header::ContentType::plaintext()); + if let Err(err) = res.send(&document_key) { + // nothing to do, but to log an error + warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err); + } + }, + Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest, + Err(Error::AccessDenied) => *res.status_mut() = HttpStatusCode::Forbidden, + Err(Error::DocumentNotFound) => *res.status_mut() = HttpStatusCode::NotFound, + Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, + Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, + } +} + +fn parse_request(method: &HttpMethod, uri_path: &str) -> Request { let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() { Ok(path) => path, Err(_) => return Request::Invalid, }; let path: Vec = uri_path.trim_left_matches('/').split('/').map(Into::into).collect(); - if path.len() != 2 || path[0].is_empty() || path[1].is_empty() { + if path.len() < 2 || path[0].is_empty() || path[1].is_empty() { return Request::Invalid; } - let document = DocumentAddress::from_str(&path[0]); - let signature = RequestSignature::from_str(&path[1]); - match (document, signature) { - (Ok(document), Ok(signature)) => Request::GetDocumentKey(document, signature), + let args_len = path.len(); + let document = path[0].parse(); + let signature = path[1].parse(); + let threshold = (if args_len > 2 { &path[2] } else { "" }).parse(); + match (args_len, method, document, signature, threshold) { + (3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold), + (2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature), _ => Request::Invalid, } } #[cfg(test)] mod tests { - use std::str::FromStr; - use super::super::RequestSignature; + use hyper::method::Method as HttpMethod; use super::{parse_request, Request}; #[test] fn parse_request_successful() { - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), - RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); - assert_eq!(parse_request("/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); + assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), - RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); } #[test] fn parse_request_failed() { - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); - assert_eq!(parse_request("/a/b"), Request::Invalid); - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/a/b"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 32ac48031..553b49bfe 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -14,42 +14,78 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::thread; +use std::sync::Arc; +use std::sync::mpsc; +use futures::{self, Future}; +use parking_lot::Mutex; +use tokio_core::reactor::Core; use ethcrypto; use ethkey; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; +use key_server_cluster::ClusterCore; use traits::KeyServer; -use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey}; +use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, ClusterConfiguration}; +use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; /// Secret store key server implementation -pub struct KeyServerImpl { - acl_storage: T, - key_storage: U, +pub struct KeyServerImpl { + data: Arc>, } -impl KeyServerImpl where T: AclStorage, U: KeyStorage { +/// Secret store key server data. +pub struct KeyServerCore { + close: Option>, + handle: Option>, + cluster: Option>, +} + +impl KeyServerImpl { /// Create new key server instance - pub fn new(acl_storage: T, key_storage: U) -> Self { - KeyServerImpl { - acl_storage: acl_storage, - key_storage: key_storage, - } + pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + Ok(KeyServerImpl { + data: Arc::new(Mutex::new(KeyServerCore::new(config, acl_storage, key_storage)?)), + }) + } + + #[cfg(test)] + /// Get cluster client reference. + pub fn cluster(&self) -> Arc { + self.data.lock().cluster.clone() + .expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") } } -impl KeyServer for KeyServerImpl where T: AclStorage, U: KeyStorage { +impl KeyServer for KeyServerImpl { + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { + // recover requestor' public key from signature + let public = ethkey::recover(signature, document) + .map_err(|_| Error::BadSignature)?; + + // generate document key + let data = self.data.lock(); + let encryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") + .new_encryption_session(document.clone(), threshold)?; + let document_key = encryption_session.wait()?; + + // encrypt document key with requestor public key + let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key) + .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; + Ok(document_key) + } + fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { // recover requestor' public key from signature let public = ethkey::recover(signature, document) .map_err(|_| Error::BadSignature)?; - // check that requestor has access to the document - if !self.acl_storage.check(&public, document)? { - return Err(Error::AccessDenied); - } + // decrypt document key + let data = self.data.lock(); + let decryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") + .new_decryption_session(document.clone(), signature.clone())?; + let document_key = decryption_session.wait()?; - // read unencrypted document key - let document_key = self.key_storage.get(document)?; // encrypt document key with requestor public key let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key) .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; @@ -57,68 +93,132 @@ impl KeyServer for KeyServerImpl where T: AclStorage, U: KeyStorage } } +impl KeyServerCore { + pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + let config = NetClusterConfiguration { + threads: config.threads, + self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?, + listen_address: (config.listener_address.address.clone(), config.listener_address.port), + nodes: config.nodes.iter() + .map(|(node_id, node_address)| (node_id.clone(), (node_address.address.clone(), node_address.port))) + .collect(), + allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, + encryption_config: config.encryption_config.clone(), + acl_storage: acl_storage, + key_storage: key_storage, + }; + + let (stop, stopped) = futures::oneshot(); + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + let mut el = match Core::new() { + Ok(el) => el, + Err(e) => { + tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread."); + return; + }, + }; + + let cluster = ClusterCore::new(el.handle(), config); + let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client())); + tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread."); + let _ = el.run(futures::empty().select(stopped)); + }); + let cluster = rx.recv().map_err(|e| Error::Internal(format!("error initializing event loop: {}", e)))??; + + Ok(KeyServerCore { + close: Some(stop), + handle: Some(handle), + cluster: Some(cluster), + }) + } +} + +impl Drop for KeyServerCore { + fn drop(&mut self) { + self.close.take().map(|v| v.send(())); + self.handle.take().map(|h| h.join()); + } +} + #[cfg(test)] mod tests { - use std::str::FromStr; + use std::time; + use std::sync::Arc; use ethcrypto; - use ethkey::{self, Secret}; + use ethkey::{self, Random, Generator}; use acl_storage::DummyAclStorage; - use key_storage::KeyStorage; use key_storage::tests::DummyKeyStorage; - use super::super::{Error, RequestSignature, DocumentAddress}; + use types::all::{ClusterConfiguration, NodeAddress, EncryptionConfiguration, DocumentEncryptedKey, DocumentKey}; + use super::super::{RequestSignature, DocumentAddress}; use super::{KeyServer, KeyServerImpl}; const DOCUMENT1: &'static str = "0000000000000000000000000000000000000000000000000000000000000001"; - const DOCUMENT2: &'static str = "0000000000000000000000000000000000000000000000000000000000000002"; - const KEY1: &'static str = "key1"; const PRIVATE1: &'static str = "03055e18a8434dcc9061cc1b81c4ef84dc7cf4574d755e52cdcf0c8898b25b11"; - const PUBLIC2: &'static str = "dfe62f56bb05fbd85b485bac749f3410309e24b352bac082468ce151e9ddb94fa7b5b730027fe1c7c5f3d5927621d269f91aceb5caa3c7fe944677a22f88a318"; - const PRIVATE2: &'static str = "0eb3816f4f705fa0fd952fb27b71b8c0606f09f4743b5b65cbc375bd569632f2"; - - fn create_key_server() -> KeyServerImpl { - let acl_storage = DummyAclStorage::default(); - let key_storage = DummyKeyStorage::default(); - key_storage.insert(DOCUMENT1.into(), KEY1.into()).unwrap(); - acl_storage.prohibit(PUBLIC2.into(), DOCUMENT1.into()); - KeyServerImpl::new(acl_storage, key_storage) - } fn make_signature(secret: &str, document: &'static str) -> RequestSignature { - let secret = Secret::from_str(secret).unwrap(); + let secret = secret.parse().unwrap(); let document: DocumentAddress = document.into(); ethkey::sign(&secret, &document).unwrap() } - #[test] - fn document_key_succeeds() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE1, DOCUMENT1); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap(); - let document_key = ethcrypto::ecies::decrypt_single_message(&Secret::from_str(PRIVATE1).unwrap(), &document_key); - assert_eq!(document_key, Ok(KEY1.into())); + fn decrypt_document_key(secret: &str, document_key: DocumentEncryptedKey) -> DocumentKey { + let secret = secret.parse().unwrap(); + ethcrypto::ecies::decrypt_single_message(&secret, &document_key).unwrap() } #[test] - fn document_key_fails_when_bad_signature() { - let key_server = create_key_server(); - let signature = RequestSignature::default(); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()); - assert_eq!(document_key, Err(Error::BadSignature)); - } + fn document_key_generation_and_retrievement_works_over_network() { + //::util::log::init_log(); - #[test] - fn document_key_fails_when_acl_check_fails() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE2, DOCUMENT1); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()); - assert_eq!(document_key, Err(Error::AccessDenied)); - } + let num_nodes = 3; + let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { + threads: 1, + self_private: (***key_pairs[i].secret()).into(), + listener_address: NodeAddress { + address: "127.0.0.1".into(), + port: 6060 + (i as u16), + }, + nodes: key_pairs.iter().enumerate().map(|(j, kp)| (kp.public().clone(), + NodeAddress { + address: "127.0.0.1".into(), + port: 6060 + (j as u16), + })).collect(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + }).collect(); + let key_servers: Vec<_> = configs.into_iter().map(|cfg| + KeyServerImpl::new(&cfg, Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() + ).collect(); - #[test] - fn document_key_fails_when_document_not_found() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE1, DOCUMENT2); - let document_key = key_server.document_key(&signature, &DOCUMENT2.into()); - assert_eq!(document_key, Err(Error::DocumentNotFound)); + // wait until connections are established + let start = time::Instant::now(); + loop { + if key_servers.iter().all(|ks| ks.cluster().cluster_state().connected.len() == num_nodes - 1) { + break; + } + if time::Instant::now() - start > time::Duration::from_millis(30000) { + panic!("connections are not established in 30000ms"); + } + } + + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate document key + // TODO: it is an error that we can regenerate key for the same DOCUMENT + let signature = make_signature(PRIVATE1, DOCUMENT1); + let generated_key = key_servers[0].generate_document_key(&signature, &DOCUMENT1.into(), *threshold).unwrap(); + let generated_key = decrypt_document_key(PRIVATE1, generated_key); + + // now let's try to retrieve key back + for key_server in key_servers.iter() { + let retrieved_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap(); + let retrieved_key = decrypt_document_key(PRIVATE1, retrieved_key); + assert_eq!(retrieved_key, generated_key); + } + } } } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 5f6c99808..388a79aef 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -14,11 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use key_server_cluster::{Error, NodeId}; -use key_server_cluster::message::Message; +use std::io; +use std::time; +use std::sync::Arc; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::collections::btree_map::Entry; +use std::net::{SocketAddr, IpAddr}; +use futures::{finished, failed, Future, Stream, BoxFuture}; +use futures_cpupool::CpuPool; +use parking_lot::{RwLock, Mutex}; +use tokio_core::io::IoFuture; +use tokio_core::reactor::{Handle, Remote, Timeout, Interval}; +use tokio_core::net::{TcpListener, TcpStream}; +use ethkey::{Secret, KeyPair, Signature, Random, Generator}; +use key_server_cluster::{Error, NodeId, SessionId, EncryptionConfiguration, AclStorage, KeyStorage}; +use key_server_cluster::message::{self, Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; +use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl, DecryptionSessionId, + SessionParams as DecryptionSessionParams, Session as DecryptionSession}; +use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState, + SessionParams as EncryptionSessionParams, Session as EncryptionSession}; +use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message}; +use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection}; + +pub type BoxedEmptyFuture = BoxFuture<(), ()>; + +/// Cluster interface for external clients. +pub trait ClusterClient: Send + Sync { + /// Get cluster state. + fn cluster_state(&self) -> ClusterState; + /// Start new encryption session. + fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error>; + /// Start new decryption session. + fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result, Error>; +} /// Cluster access for single encryption/decryption participant. -pub trait Cluster { +pub trait Cluster: Send + Sync { /// Broadcast message to all other nodes. fn broadcast(&self, message: Message) -> Result<(), Error>; /// Send message to given node. @@ -27,13 +58,841 @@ pub trait Cluster { fn blacklist(&self, node: &NodeId); } +#[derive(Clone)] +/// Cluster initialization parameters. +pub struct ClusterConfiguration { + /// Number of threads reserved by cluster. + pub threads: usize, + /// Allow connecting to 'higher' nodes. + pub allow_connecting_to_higher_nodes: bool, + /// KeyPair this node holds. + pub self_key_pair: KeyPair, + /// Interface to listen to. + pub listen_address: (String, u16), + /// Cluster nodes. + pub nodes: BTreeMap, + /// Encryption session configuration. + pub encryption_config: EncryptionConfiguration, + /// Reference to key storage + pub key_storage: Arc, + /// Reference to ACL storage + pub acl_storage: Arc, +} + +/// Cluster state. +pub struct ClusterState { + /// Nodes, to which connections are established. + pub connected: BTreeSet, +} + +/// Network cluster implementation. +pub struct ClusterCore { + /// Handle to the event loop. + handle: Handle, + /// Listen address. + listen_address: SocketAddr, + /// Cluster data. + data: Arc, +} + +/// Network cluster client interface implementation. +pub struct ClusterClientImpl { + /// Cluster data. + data: Arc, +} + +/// Network cluster view. It is a communication channel, required in single session. +pub struct ClusterView { + core: Arc>, +} + +/// Cross-thread shareable cluster data. +pub struct ClusterData { + /// Cluster configuration. + config: ClusterConfiguration, + /// Handle to the event loop. + handle: Remote, + /// Handle to the cpu thread pool. + pool: CpuPool, + /// KeyPair this node holds. + self_key_pair: KeyPair, + /// Connections data. + connections: ClusterConnections, + /// Active sessions data. + sessions: ClusterSessions, +} + +/// Connections that are forming the cluster. +pub struct ClusterConnections { + /// Self node id. + pub self_node_id: NodeId, + /// All known other key servers. + pub nodes: BTreeMap, + /// Active connections to key servers. + pub connections: RwLock>>, +} + +/// Active sessions on this cluster. +pub struct ClusterSessions { + /// Self node id. + pub self_node_id: NodeId, + /// Reference to key storage + pub key_storage: Arc, + /// Reference to ACL storage + pub acl_storage: Arc, + /// Active encryption sessions. + pub encryption_sessions: RwLock>, + /// Active decryption sessions. + pub decryption_sessions: RwLock>, +} + +/// Encryption session and its message queue. +pub struct QueuedEncryptionSession { + /// Encryption session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, EncryptionMessage)>, +} + +/// Decryption session and its message queue. +pub struct QueuedDecryptionSession { + /// Decryption session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, DecryptionMessage)>, +} + +/// Cluster view core. +struct ClusterViewCore { + /// Cluster reference. + cluster: Arc, + /// Subset of nodes, required for this session. + nodes: BTreeSet, +} + +/// Connection to single node. +pub struct Connection { + /// Node id. + node_id: NodeId, + /// Node address. + node_address: SocketAddr, + /// Is inbound connection? + is_inbound: bool, + /// Tcp stream. + stream: SharedTcpStream, + /// Connection key. + key: Secret, + /// Last message time. + last_message_time: Mutex, +} + +impl ClusterCore { + pub fn new(handle: Handle, config: ClusterConfiguration) -> Result, Error> { + let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?; + let connections = ClusterConnections::new(&config)?; + let sessions = ClusterSessions::new(&config); + let data = ClusterData::new(&handle, config, connections, sessions); + + Ok(Arc::new(ClusterCore { + handle: handle, + listen_address: listen_address, + data: data, + })) + } + + /// Create new client interface. + pub fn client(&self) -> Arc { + Arc::new(ClusterClientImpl::new(self.data.clone())) + } + + #[cfg(test)] + /// Get cluster configuration. + pub fn config(&self) -> &ClusterConfiguration { + &self.data.config + } + + #[cfg(test)] + /// Get connection to given node. + pub fn connection(&self, node: &NodeId) -> Option> { + self.data.connection(node) + } + + /// Run cluster + pub fn run(&self) -> Result<(), Error> { + // try to connect to every other peer + ClusterCore::connect_disconnected_nodes(self.data.clone()); + + // schedule maintain procedures + ClusterCore::schedule_maintain(&self.handle, self.data.clone()); + + // start listening for incoming connections + self.handle.spawn(ClusterCore::listen(&self.handle, self.data.clone(), self.listen_address.clone())?); + + Ok(()) + } + + /// Connect to peer. + fn connect(data: Arc, node_address: SocketAddr) { + data.handle.clone().spawn(move |handle| { + data.pool.clone().spawn(ClusterCore::connect_future(handle, data, node_address)) + }) + } + + /// Connect to socket using given context and handle. + fn connect_future(handle: &Handle, data: Arc, node_address: SocketAddr) -> BoxedEmptyFuture { + let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); + net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| ClusterCore::process_connection_result(data, false, result)) + .then(|_| finished(())) + .boxed() + } + + /// Start listening for incoming connections. + fn listen(handle: &Handle, data: Arc, listen_address: SocketAddr) -> Result { + Ok(TcpListener::bind(&listen_address, &handle)? + .incoming() + .and_then(move |(stream, node_address)| { + ClusterCore::accept_connection(data.clone(), stream, node_address); + Ok(()) + }) + .for_each(|_| Ok(())) + .then(|_| finished(())) + .boxed()) + } + + /// Accept connection. + fn accept_connection(data: Arc, stream: TcpStream, node_address: SocketAddr) { + data.handle.clone().spawn(move |handle| { + data.pool.clone().spawn(ClusterCore::accept_connection_future(handle, data, stream, node_address)) + }) + } + + /// Accept connection future. + fn accept_connection_future(handle: &Handle, data: Arc, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { + let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); + net_accept_connection(node_address, stream, handle, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| ClusterCore::process_connection_result(data, true, result)) + .then(|_| finished(())) + .boxed() + } + + /// Schedule mainatain procedures. + fn schedule_maintain(handle: &Handle, data: Arc) { + // TODO: per-session timeouts (node can respond to messages, but ignore sessions messages) + let (d1, d2, d3) = (data.clone(), data.clone(), data.clone()); + let interval: BoxedEmptyFuture = Interval::new(time::Duration::new(10, 0), handle) + .expect("failed to create interval") + .and_then(move |_| Ok(trace!(target: "secretstore_net", "{}: executing maintain procedures", d1.self_key_pair.public()))) + .and_then(move |_| Ok(ClusterCore::keep_alive(d2.clone()))) + .and_then(move |_| Ok(ClusterCore::connect_disconnected_nodes(d3.clone()))) + .for_each(|_| Ok(())) + .then(|_| finished(())) + .boxed(); + + data.spawn(interval); + } + + /// Called for every incomming mesage. + fn process_connection_messages(data: Arc, connection: Arc) -> IoFuture> { + connection + .read_message() + .then(move |result| + match result { + Ok((_, Ok(message))) => { + ClusterCore::process_connection_message(data.clone(), connection.clone(), message); + // continue serving connection + data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); + finished(Ok(())).boxed() + }, + Ok((_, Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + // continue serving connection + data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); + finished(Err(err)).boxed() + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: network error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + // close connection + data.connections.remove(connection.node_id(), connection.is_inbound()); + failed(err).boxed() + }, + } + ).boxed() + } + + /// Send keepalive messages to every othe node. + fn keep_alive(data: Arc) { + for connection in data.connections.active_connections() { + let last_message_diff = time::Instant::now() - connection.last_message_time(); + if last_message_diff > time::Duration::from_secs(60) { + data.connections.remove(connection.node_id(), connection.is_inbound()); + data.sessions.on_connection_timeout(connection.node_id()); + } + else if last_message_diff > time::Duration::from_secs(30) { + data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {})))); + } + } + } + + /// Try to connect to every disconnected node. + fn connect_disconnected_nodes(data: Arc) { + for (node_id, node_address) in data.connections.disconnected_nodes() { + if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id { + ClusterCore::connect(data.clone(), node_address); + } + } + } + + /// Process connection future result. + fn process_connection_result(data: Arc, is_inbound: bool, result: Result>, io::Error>) -> IoFuture> { + match result { + Ok(DeadlineStatus::Meet(Ok(connection))) => { + let connection = Connection::new(is_inbound, connection); + if data.connections.insert(connection.clone()) { + ClusterCore::process_connection_messages(data.clone(), connection) + } else { + finished(Ok(())).boxed() + } + }, + Ok(DeadlineStatus::Meet(Err(_))) => { + finished(Ok(())).boxed() + }, + Ok(DeadlineStatus::Timeout) => { + finished(Ok(())).boxed() + }, + Err(_) => { + // network error + finished(Ok(())).boxed() + }, + } + } + + /// Process single message from the connection. + fn process_connection_message(data: Arc, connection: Arc, message: Message) { + connection.set_last_message_time(time::Instant::now()); + trace!(target: "secretstore_net", "{}: processing message {} from {}", data.self_key_pair.public(), message, connection.node_id()); + match message { + Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message), + Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message), + Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message), + } + } + + /// Process single encryption message from the connection. + fn process_encryption_message(data: Arc, connection: Arc, mut message: EncryptionMessage) { + let mut sender = connection.node_id().clone(); + let mut is_queued_message = false; + let session_id = message.session_id().clone(); + let key_check_timeout_ms = data.config.encryption_config.key_check_timeout_ms; + loop { + let result = match message { + EncryptionMessage::InitializeSession(ref message) => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + let session_id: SessionId = message.session.clone().into(); + data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster) + .and_then(|s| s.on_initialize_session(sender.clone(), message)) + }, + EncryptionMessage::ConfirmInitialization(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_confirm_initialization(sender.clone(), message)), + EncryptionMessage::CompleteInitialization(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complete_initialization(sender.clone(), message)), + EncryptionMessage::KeysDissemination(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| { + // TODO: move this logic to session (or session connector) + let is_in_key_check_state = s.state() == EncryptionSessionState::KeyCheck; + let result = s.on_keys_dissemination(sender.clone(), message); + if !is_in_key_check_state && s.state() == EncryptionSessionState::KeyCheck { + let session = s.clone(); + let d = data.clone(); + data.handle.spawn(move |handle| + Timeout::new(time::Duration::new(key_check_timeout_ms / 1000, 0), handle) + .expect("failed to create timeout") + .and_then(move |_| { + if let Err(error) = session.start_key_generation_phase() { + session.on_session_error(d.self_key_pair.public().clone(), &message::SessionError { + session: session.id().clone().into(), + error: error.into(), + }); + } + Ok(()) + }) + .then(|_| finished(())) + ); + } + + result + }), + EncryptionMessage::Complaint(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complaint(sender.clone(), message)), + EncryptionMessage::ComplaintResponse(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complaint_response(sender.clone(), message)), + EncryptionMessage::PublicKeyShare(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_public_key_share(sender.clone(), message)), + EncryptionMessage::SessionError(ref message) => { + if let Some(s) = data.sessions.encryption_session(&*message.session) { + data.sessions.remove_encryption_session(s.id()); + s.on_session_error(sender.clone(), message); + } + Ok(()) + }, + EncryptionMessage::SessionCompleted(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| { + let result = s.on_session_completed(sender.clone(), message); + if result.is_ok() && s.state() == EncryptionSessionState::Finished { + data.sessions.remove_encryption_session(s.id()); + } + + result + }), + }; + + match result { + Err(Error::TooEarlyForRequest) => { + data.sessions.enqueue_encryption_message(&session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + if let Some(connection) = data.connections.get(&sender) { + data.spawn(connection.send_message(Message::Encryption(EncryptionMessage::SessionError(message::SessionError { + session: session_id.clone().into(), + error: format!("{:?}", err), + })))); + } + + if err != Error::InvalidSessionId { + data.sessions.remove_encryption_session(&session_id); + } + break; + }, + _ => { + match data.sessions.dequeue_encryption_message(&session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + } + } + } + + /// Process single decryption message from the connection. + fn process_decryption_message(data: Arc, connection: Arc, mut message: DecryptionMessage) { + let mut sender = connection.node_id().clone(); + let mut is_queued_message = false; + let session_id = message.session_id().clone(); + let sub_session_id = message.sub_session_id().clone(); + loop { + let result = match message { + DecryptionMessage::InitializeDecryptionSession(ref message) => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster) + .and_then(|s| s.on_initialize_session(sender.clone(), message)) + }, + DecryptionMessage::ConfirmDecryptionInitialization(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_confirm_initialization(sender.clone(), message)), + DecryptionMessage::RequestPartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_partial_decryption_requested(sender.clone(), message)), + DecryptionMessage::PartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_partial_decryption(sender.clone(), message)), + DecryptionMessage::DecryptionSessionError(ref message) => { + if let Some(s) = data.sessions.decryption_session(&*message.session, &*message.sub_session) { + data.sessions.remove_decryption_session(&session_id, &sub_session_id); + s.on_session_error(sender.clone(), message); + } + Ok(()) + }, + }; + + match result { + Err(Error::TooEarlyForRequest) => { + data.sessions.enqueue_decryption_message(&session_id, &sub_session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + if let Some(connection) = data.connections.get(&sender) { + data.spawn(connection.send_message(Message::Decryption(DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError { + session: session_id.clone().into(), + sub_session: sub_session_id.clone().into(), + error: format!("{:?}", err), + })))); + } + + if err != Error::InvalidSessionId { + data.sessions.remove_decryption_session(&session_id, &sub_session_id); + } + break; + }, + _ => { + match data.sessions.dequeue_decryption_message(&session_id, &sub_session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + } + } + } + + /// Process single cluster message from the connection. + fn process_cluster_message(data: Arc, connection: Arc, message: ClusterMessage) { + match message { + ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {})))), + ClusterMessage::KeepAliveResponse(_) => (), + _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()), + } + } +} + +impl ClusterConnections { + pub fn new(config: &ClusterConfiguration) -> Result { + let mut connections = ClusterConnections { + self_node_id: config.self_key_pair.public().clone(), + nodes: BTreeMap::new(), + connections: RwLock::new(BTreeMap::new()), + }; + + for (node_id, &(ref node_addr, node_port)) in config.nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) { + let socket_address = make_socket_address(&node_addr, node_port)?; + connections.nodes.insert(node_id.clone(), socket_address); + } + + Ok(connections) + } + + pub fn cluster_state(&self) -> ClusterState { + ClusterState { + connected: self.connections.read().keys().cloned().collect(), + } + } + + pub fn get(&self, node: &NodeId) -> Option> { + self.connections.read().get(node).cloned() + } + + pub fn insert(&self, connection: Arc) -> bool { + let mut connections = self.connections.write(); + if connections.contains_key(connection.node_id()) { + // we have already connected to the same node + // the agreement is that node with lower id must establish connection to node with higher id + if (&self.self_node_id < connection.node_id() && connection.is_inbound()) + || (&self.self_node_id > connection.node_id() && !connection.is_inbound()) { + return false; + } + } + trace!(target: "secretstore_net", "{}: inserting connection to {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); + connections.insert(connection.node_id().clone(), connection); + true + } + + pub fn remove(&self, node: &NodeId, is_inbound: bool) { + let mut connections = self.connections.write(); + if let Entry::Occupied(entry) = connections.entry(node.clone()) { + if entry.get().is_inbound() != is_inbound { + return; + } + + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); + entry.remove_entry(); + } + } + + pub fn connected_nodes(&self) -> BTreeSet { + self.connections.read().keys().cloned().collect() + } + + pub fn active_connections(&self)-> Vec> { + self.connections.read().values().cloned().collect() + } + + pub fn disconnected_nodes(&self) -> BTreeMap { + let connections = self.connections.read(); + self.nodes.iter() + .filter(|&(node_id, _)| !connections.contains_key(node_id)) + .map(|(node_id, node_address)| (node_id.clone(), node_address.clone())) + .collect() + } +} + +impl ClusterSessions { + pub fn new(config: &ClusterConfiguration) -> Self { + ClusterSessions { + self_node_id: config.self_key_pair.public().clone(), + acl_storage: config.acl_storage.clone(), + key_storage: config.key_storage.clone(), + encryption_sessions: RwLock::new(BTreeMap::new()), + decryption_sessions: RwLock::new(BTreeMap::new()), + } + } + + pub fn new_encryption_session(&self, _master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { + let mut encryption_sessions = self.encryption_sessions.write(); + if encryption_sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } + + let session = Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams { + id: session_id.clone(), + self_node_id: self.self_node_id.clone(), + key_storage: self.key_storage.clone(), + cluster: cluster, + })); + let encryption_session = QueuedEncryptionSession { + session: session.clone(), + queue: VecDeque::new() + }; + encryption_sessions.insert(session_id, encryption_session); + Ok(session) + } + + pub fn remove_encryption_session(&self, session_id: &SessionId) { + self.encryption_sessions.write().remove(session_id); + } + + pub fn encryption_session(&self, session_id: &SessionId) -> Option> { + self.encryption_sessions.read().get(session_id).map(|s| s.session.clone()) + } + + pub fn enqueue_encryption_message(&self, session_id: &SessionId, sender: NodeId, message: EncryptionMessage, is_queued_message: bool) { + self.encryption_sessions.write().get_mut(session_id) + .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } + else { session.queue.push_back((sender, message)) }); + } + + pub fn dequeue_encryption_message(&self, session_id: &SessionId) -> Option<(NodeId, EncryptionMessage)> { + self.encryption_sessions.write().get_mut(session_id) + .and_then(|session| session.queue.pop_front()) + } + + pub fn new_decryption_session(&self, _master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc) -> Result, Error> { + let mut decryption_sessions = self.decryption_sessions.write(); + let session_id = DecryptionSessionId::new(session_id, sub_session_id); + if decryption_sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } + + let session = Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams { + id: session_id.id.clone(), + access_key: session_id.access_key.clone(), + self_node_id: self.self_node_id.clone(), + encrypted_data: self.key_storage.get(&session_id.id).map_err(|e| Error::KeyStorage(e.into()))?, + acl_storage: self.acl_storage.clone(), + cluster: cluster, + })?); + let decryption_session = QueuedDecryptionSession { + session: session.clone(), + queue: VecDeque::new() + }; + decryption_sessions.insert(session_id, decryption_session); + Ok(session) + } + + pub fn remove_decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().remove(&session_id); + } + + pub fn decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option> { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.read().get(&session_id).map(|s| s.session.clone()) + } + + pub fn enqueue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret, sender: NodeId, message: DecryptionMessage, is_queued_message: bool) { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().get_mut(&session_id) + .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } + else { session.queue.push_back((sender, message)) }); + } + + pub fn dequeue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<(NodeId, DecryptionMessage)> { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().get_mut(&session_id) + .and_then(|session| session.queue.pop_front()) + } + + pub fn on_connection_timeout(&self, node_id: &NodeId) { + for encryption_session in self.encryption_sessions.read().values() { + encryption_session.session.on_session_timeout(node_id); + } + for decryption_session in self.decryption_sessions.read().values() { + decryption_session.session.on_session_timeout(node_id); + } + } +} + +impl ClusterData { + pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc { + Arc::new(ClusterData { + handle: handle.remote().clone(), + pool: CpuPool::new(config.threads), + self_key_pair: config.self_key_pair.clone(), + connections: connections, + sessions: sessions, + config: config, + }) + } + + /// Get connection to given node. + pub fn connection(&self, node: &NodeId) -> Option> { + self.connections.get(node) + } + + /// Spawns a future using thread pool and schedules execution of it with event loop handle. + pub fn spawn(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static { + let pool_work = self.pool.spawn(f); + self.handle.spawn(move |_handle| { + pool_work.then(|_| finished(())) + }) + } +} + +impl Connection { + pub fn new(is_inbound: bool, connection: NetConnection) -> Arc { + Arc::new(Connection { + node_id: connection.node_id, + node_address: connection.address, + is_inbound: is_inbound, + stream: connection.stream, + key: connection.key, + last_message_time: Mutex::new(time::Instant::now()), + }) + } + + pub fn is_inbound(&self) -> bool { + self.is_inbound + } + + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + pub fn last_message_time(&self) -> time::Instant { + *self.last_message_time.lock() + } + + pub fn set_last_message_time(&self, last_message_time: time::Instant) { + *self.last_message_time.lock() = last_message_time; + } + + pub fn node_address(&self) -> &SocketAddr { + &self.node_address + } + + pub fn send_message(&self, message: Message) -> WriteMessage { + write_encrypted_message(self.stream.clone(), &self.key, message) + } + + pub fn read_message(&self) -> ReadMessage { + read_encrypted_message(self.stream.clone(), self.key.clone()) + } +} + +impl ClusterView { + pub fn new(cluster: Arc, nodes: BTreeSet) -> Self { + ClusterView { + core: Arc::new(Mutex::new(ClusterViewCore { + cluster: cluster, + nodes: nodes, + })), + } + } +} + +impl Cluster for ClusterView { + fn broadcast(&self, message: Message) -> Result<(), Error> { + let core = self.core.lock(); + for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) { + let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?; + core.cluster.spawn(connection.send_message(message.clone())) + } + Ok(()) + } + + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + let core = self.core.lock(); + let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?; + core.cluster.spawn(connection.send_message(message)); + Ok(()) + } + + fn blacklist(&self, _node: &NodeId) { + // TODO: unimplemented!() + } +} + +impl ClusterClientImpl { + pub fn new(data: Arc) -> Self { + ClusterClientImpl { + data: data, + } + } +} + +impl ClusterClient for ClusterClientImpl { + fn cluster_state(&self) -> ClusterState { + self.data.connections.cluster_state() + } + + fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, cluster)?; + session.initialize(threshold, connected_nodes)?; + Ok(session) + } + + fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let access_key = Random.generate()?.secret().clone(); + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key, cluster)?; + session.initialize(requestor_signature)?; + Ok(session) + } +} + +fn make_socket_address(address: &str, port: u16) -> Result { + let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; + Ok(SocketAddr::new(ip_address, port)) +} + #[cfg(test)] pub mod tests { + use std::sync::Arc; + use std::time; use std::collections::VecDeque; use parking_lot::Mutex; - use key_server_cluster::{NodeId, Error}; + use tokio_core::reactor::Core; + use ethkey::{Random, Generator}; + use key_server_cluster::{NodeId, Error, EncryptionConfiguration, DummyAclStorage, DummyKeyStorage}; use key_server_cluster::message::Message; - use key_server_cluster::cluster::Cluster; + use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; #[derive(Debug)] pub struct DummyCluster { @@ -87,4 +946,61 @@ pub mod tests { fn blacklist(&self, _node: &NodeId) { } } + + pub fn loop_until(core: &mut Core, timeout: time::Duration, predicate: F) where F: Fn() -> bool { + let start = time::Instant::now(); + loop { + core.turn(Some(time::Duration::from_millis(1))); + if predicate() { + break; + } + + if time::Instant::now() - start > timeout { + panic!("no result in {:?}", timeout); + } + } + } + + pub fn all_connections_established(cluster: &Arc) -> bool { + cluster.config().nodes.keys() + .filter(|p| *p != cluster.config().self_key_pair.public()) + .all(|p| cluster.connection(p).is_some()) + } + + pub fn make_clusters(core: &Core, ports_begin: u16, num_nodes: usize) -> Vec> { + let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { + threads: 1, + self_key_pair: key_pairs[i].clone(), + listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), + nodes: key_pairs.iter().enumerate() + .map(|(j, kp)| (kp.public().clone(), ("127.0.0.1".into(), ports_begin + j as u16))) + .collect(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + key_storage: Arc::new(DummyKeyStorage::default()), + acl_storage: Arc::new(DummyAclStorage::default()), + }).collect(); + let clusters: Vec<_> = cluster_params.into_iter().enumerate() + .map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap()) + .collect(); + + clusters + } + + pub fn run_clusters(clusters: &[Arc]) { + for cluster in clusters { + cluster.run().unwrap(); + } + } + + #[test] + fn cluster_connects_to_other_nodes() { + let mut core = Core::new().unwrap(); + let clusters = make_clusters(&core, 6010, 3); + run_clusters(&clusters); + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + } } diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index d4160851e..71d8ad26f 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -14,15 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::cmp::{Ord, PartialOrd, Ordering}; use std::collections::{BTreeSet, BTreeMap}; use std::sync::Arc; -use parking_lot::Mutex; +use parking_lot::{Mutex, Condvar}; use ethkey::{self, Secret, Public, Signature}; -use key_server_cluster::{Error, AclStorage, EncryptedData, NodeId, SessionId}; +use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId}; use key_server_cluster::cluster::Cluster; use key_server_cluster::math; -use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmDecryptionInitialization, - RequestPartialDecryption, PartialDecryption}; +use key_server_cluster::message::{Message, DecryptionMessage, InitializeDecryptionSession, ConfirmDecryptionInitialization, + RequestPartialDecryption, PartialDecryption, DecryptionSessionError}; + +/// Decryption session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. Returns distributely restored secret key. + fn wait(&self) -> Result; +} /// Distributed decryption session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -32,7 +39,7 @@ use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmD /// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the document /// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption /// 4) decryption: master node receives all partial decryptions of the secret and restores the secret -pub struct Session { +pub struct SessionImpl { /// Encryption session id. id: SessionId, /// Decryption session access key. @@ -40,25 +47,36 @@ pub struct Session { /// Public identifier of this node. self_node_id: NodeId, /// Encrypted data. - encrypted_data: EncryptedData, + encrypted_data: DocumentKeyShare, /// ACL storate to check access to the resource. acl_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// SessionImpl completion condvar. + completed: Condvar, /// Mutable session data. data: Mutex, } -/// Session creation parameters -pub struct SessionParams { - /// Session identifier. +/// Decryption session Id. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DecryptionSessionId { + /// Encryption session id. pub id: SessionId, - /// Session access key. + /// Decryption session access key. + pub access_key: Secret, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// SessionImpl identifier. + pub id: SessionId, + /// SessionImpl access key. pub access_key: Secret, /// Id of node, on which this session is running. pub self_node_id: Public, - /// Encrypted data (result of running encryption_session::Session). - pub encrypted_data: EncryptedData, + /// Encrypted data (result of running encryption_session::SessionImpl). + pub encrypted_data: DocumentKeyShare, /// ACL storage. pub acl_storage: Arc, /// Cluster @@ -91,16 +109,11 @@ struct SessionData { /// === Values, filled during final decryption === /// Decrypted secret - decrypted_secret: Option, -} - -#[derive(Debug)] -struct NodeData { - /// Node-generated shadow point. - shadow_point: Option, + decrypted_secret: Option>, } #[derive(Debug, Clone, PartialEq)] +/// Decryption session data. pub enum SessionState { /// Every node starts in this state. WaitingForInitialization, @@ -116,18 +129,19 @@ pub enum SessionState { Failed, } -impl Session { +impl SessionImpl { /// Create new decryption session. pub fn new(params: SessionParams) -> Result { check_encrypted_data(¶ms.self_node_id, ¶ms.encrypted_data)?; - Ok(Session { + Ok(SessionImpl { id: params.id, access_key: params.access_key, self_node_id: params.self_node_id, encrypted_data: params.encrypted_data, acl_storage: params.acl_storage, cluster: params.cluster, + completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, master: None, @@ -146,19 +160,22 @@ impl Session { &self.self_node_id } + #[cfg(test)] /// Get this session access key. pub fn access_key(&self) -> &Secret { &self.access_key } + #[cfg(test)] /// Get current session state. pub fn state(&self) -> SessionState { self.data.lock().state.clone() } + #[cfg(test)] /// Get decrypted secret pub fn decrypted_secret(&self) -> Option { - self.data.lock().decrypted_secret.clone() + self.data.lock().decrypted_secret.clone().and_then(|r| r.ok()) } /// Initialize decryption session. @@ -188,15 +205,20 @@ impl Session { // not enough nodes => pass initialization message to all other nodes SessionState::WaitingForInitializationConfirm => { for node in self.encrypted_data.id_numbers.keys().filter(|n| *n != self.node()) { - self.cluster.send(node, Message::InitializeDecryptionSession(InitializeDecryptionSession { - session: self.id.clone(), - sub_session: self.access_key.clone(), - requestor_signature: requestor_signature.clone(), - }))?; + self.cluster.send(node, Message::Decryption(DecryptionMessage::InitializeDecryptionSession(InitializeDecryptionSession { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + requestor_signature: requestor_signature.clone().into(), + })))?; } }, // we can decrypt data on our own - SessionState::WaitingForPartialDecryption => unimplemented!(), + SessionState::WaitingForPartialDecryption => { + data.confirmed_nodes.insert(self.node().clone()); + SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data)?; + SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; + self.completed.notify_all(); + }, // we can not decrypt data SessionState::Failed => (), // cannot reach other states @@ -207,9 +229,9 @@ impl Session { } /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, message: InitializeDecryptionSession) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeDecryptionSession) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -230,17 +252,17 @@ impl Session { // respond to master node data.master = Some(sender.clone()); - self.cluster.send(&sender, Message::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { - session: self.id.clone(), - sub_session: self.access_key.clone(), + self.cluster.send(&sender, Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), is_confirmed: is_requestor_allowed_to_read, - })) + }))) } /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, message: ConfirmDecryptionInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmDecryptionInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -260,26 +282,8 @@ impl Session { // we do not yet have enough nodes for decryption SessionState::WaitingForInitializationConfirm => Ok(()), // we have enough nodes for decryption - SessionState::WaitingForPartialDecryption => { - let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); - for node in data.confirmed_nodes.iter().filter(|n| n != &self.node()) { - self.cluster.send(node, Message::RequestPartialDecryption(RequestPartialDecryption { - session: self.id.clone(), - sub_session: self.access_key.clone(), - nodes: confirmed_nodes.clone(), - }))?; - } - - assert!(data.confirmed_nodes.remove(self.node())); - - let shadow_point = { - let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); - do_partial_decryption(self.node(), &requestor, &data.confirmed_nodes, &self.access_key, &self.encrypted_data)? - }; - data.shadow_points.insert(self.node().clone(), shadow_point); - - Ok(()) - }, + SessionState::WaitingForPartialDecryption => + SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data), // we can not have enough nodes for decryption SessionState::Failed => Ok(()), // cannot reach other states @@ -288,9 +292,9 @@ impl Session { } /// When partial decryption is requested. - pub fn on_partial_decryption_requested(&self, sender: NodeId, message: RequestPartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_partial_decryption_requested(&self, sender: NodeId, message: &RequestPartialDecryption) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); // check message @@ -311,13 +315,13 @@ impl Session { // calculate shadow point let shadow_point = { let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed"); - do_partial_decryption(self.node(), &requestor, &message.nodes, &self.access_key, &self.encrypted_data)? + do_partial_decryption(self.node(), &requestor, &message.nodes.iter().cloned().map(Into::into).collect(), &self.access_key, &self.encrypted_data)? }; - self.cluster.send(&sender, Message::PartialDecryption(PartialDecryption { - session: self.id.clone(), - sub_session: self.access_key.clone(), - shadow_point: shadow_point, - }))?; + self.cluster.send(&sender, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + shadow_point: shadow_point.into(), + })))?; // update sate data.state = SessionState::Finished; @@ -326,9 +330,9 @@ impl Session { } /// When partial decryption is received. - pub fn on_partial_decryption(&self, sender: NodeId, message: PartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_partial_decryption(&self, sender: NodeId, message: &PartialDecryption) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -341,24 +345,113 @@ impl Session { if !data.confirmed_nodes.remove(&sender) { return Err(Error::InvalidStateForRequest); } - data.shadow_points.insert(sender, message.shadow_point); + data.shadow_points.insert(sender, message.shadow_point.clone().into()); // check if we have enough shadow points to decrypt the secret if data.shadow_points.len() != self.encrypted_data.threshold + 1 { return Ok(()); } + SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; + self.completed.notify_all(); + + Ok(()) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: NodeId, message: &DecryptionSessionError) { + warn!("{}: decryption session error: {:?} from {}", self.node(), message, sender); + let mut data = self.data.lock(); + data.state = SessionState::Failed; + data.decrypted_secret = Some(Err(Error::Io(message.error.clone()))); + self.completed.notify_all(); + } + + /// When session timeout has occured. + pub fn on_session_timeout(&self, _node: &NodeId) { + warn!("{}: decryption session timeout", self.node()); + let mut data = self.data.lock(); + // TODO: check that node is a part of decryption process + data.state = SessionState::Failed; + data.decrypted_secret = Some(Err(Error::Io("session expired".into()))); + self.completed.notify_all(); + } + + fn start_waiting_for_partial_decryption(self_node_id: NodeId, session_id: SessionId, access_key: Secret, cluster: &Arc, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { + let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); + for node in data.confirmed_nodes.iter().filter(|n| n != &&self_node_id) { + cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption { + session: session_id.clone().into(), + sub_session: access_key.clone().into(), + nodes: confirmed_nodes.iter().cloned().map(Into::into).collect(), + })))?; + } + + assert!(data.confirmed_nodes.remove(&self_node_id)); + + let shadow_point = { + let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); + do_partial_decryption(&self_node_id, &requestor, &data.confirmed_nodes, &access_key, &encrypted_data)? + }; + data.shadow_points.insert(self_node_id.clone(), shadow_point); + + Ok(()) + } + + fn do_decryption(access_key: Secret, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { // decrypt the secret using shadow points let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values())?; - let decrypted_secret = math::decrypt_with_joint_shadow(&self.access_key, &self.encrypted_data.encrypted_point, &joint_shadow_point)?; - data.decrypted_secret = Some(decrypted_secret); + let decrypted_secret = math::decrypt_with_joint_shadow(encrypted_data.threshold, &access_key, &encrypted_data.encrypted_point, &joint_shadow_point)?; + data.decrypted_secret = Some(Ok(decrypted_secret)); + + // switch to completed state data.state = SessionState::Finished; Ok(()) } } -fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) -> Result<(), Error> { +impl Session for SessionImpl { + fn wait(&self) -> Result { + let mut data = self.data.lock(); + if !data.decrypted_secret.is_some() { + self.completed.wait(&mut data); + } + + data.decrypted_secret.as_ref() + .expect("checked above or waited for completed; completed is only signaled when decrypted_secret.is_some(); qed") + .clone() + } +} + +impl DecryptionSessionId { + /// Create new decryption session Id. + pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self { + DecryptionSessionId { + id: session_id, + access_key: sub_session_id, + } + } +} + +impl PartialOrd for DecryptionSessionId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + + +impl Ord for DecryptionSessionId { + fn cmp(&self, other: &Self) -> Ordering { + match self.id.cmp(&other.id) { + Ordering::Equal => self.access_key.cmp(&other.access_key), + r @ _ => r, + } + } +} + + +fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> { use key_server_cluster::encryption_session::{check_cluster_nodes, check_threshold}; let nodes = encrypted_data.id_numbers.keys().cloned().collect(); @@ -368,7 +461,7 @@ fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) - Ok(()) } -fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { +fn process_initialization_response(encrypted_data: &DocumentKeyShare, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { if !data.requested_nodes.remove(node) { return Err(Error::InvalidMessage); } @@ -395,7 +488,7 @@ fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut Se Ok(()) } -fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet, access_key: &Secret, encrypted_data: &EncryptedData) -> Result { +fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet, access_key: &Secret, encrypted_data: &DocumentKeyShare) -> Result { let node_id_number = &encrypted_data.id_numbers[node]; let node_secret_share = &encrypted_data.secret_share; let other_id_numbers = participants.iter() @@ -409,43 +502,42 @@ fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants #[cfg(test)] mod tests { use std::sync::Arc; - use std::str::FromStr; use std::collections::BTreeMap; use super::super::super::acl_storage::DummyAclStorage; use ethkey::{self, Random, Generator, Public, Secret}; - use key_server_cluster::{NodeId, EncryptedData, SessionId, Error}; + use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error}; use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::decryption_session::{Session, SessionParams, SessionState}; - use key_server_cluster::message::{self, Message}; + use key_server_cluster::decryption_session::{SessionImpl, SessionParams, SessionState}; + use key_server_cluster::message::{self, Message, DecryptionMessage}; const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; - fn prepare_decryption_sessions() -> (Vec>, Vec>, Vec) { + fn prepare_decryption_sessions() -> (Vec>, Vec>, Vec) { // prepare encrypted data + cluster configuration for scheme 4-of-5 let session_id = SessionId::default(); let access_key = Random.generate().unwrap().secret().clone(); - let secret_shares = vec![ - Secret::from_str("834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec").unwrap(), - Secret::from_str("5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b").unwrap(), - Secret::from_str("71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9").unwrap(), - Secret::from_str("80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4").unwrap(), - Secret::from_str("c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad").unwrap(), + let secret_shares: Vec = vec![ + "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(), + "5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b".parse().unwrap(), + "71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9".parse().unwrap(), + "80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4".parse().unwrap(), + "c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad".parse().unwrap(), ]; let id_numbers: Vec<(NodeId, Secret)> = vec![ ("b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), - Secret::from_str("281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c").unwrap()), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()), ("1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".into(), - Secret::from_str("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b").unwrap()), + "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse().unwrap()), ("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(), - Secret::from_str("f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62").unwrap()), + "f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62".parse().unwrap()), ("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(), - Secret::from_str("5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f").unwrap()), + "5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f".parse().unwrap()), ("321977760d1d8e15b047a309e4c7fe6f355c10bb5a06c68472b676926427f69f229024fa2692c10da167d14cdc77eb95d0fce68af0a0f704f0d3db36baa83bb2".into(), - Secret::from_str("12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8").unwrap()), + "12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8".parse().unwrap()), ]; let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); - let encrypted_datas: Vec<_> = (0..5).map(|i| EncryptedData { + let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare { threshold: 3, id_numbers: id_numbers.clone().into_iter().collect(), secret_share: secret_shares[i].clone(), @@ -454,7 +546,7 @@ mod tests { }).collect(); let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect(); let clusters: Vec<_> = (0..5).map(|i| Arc::new(DummyCluster::new(id_numbers.iter().nth(i).clone().unwrap().0))).collect(); - let sessions: Vec<_> = (0..5).map(|i| Session::new(SessionParams { + let sessions: Vec<_> = (0..5).map(|i| SessionImpl::new(SessionParams { id: session_id.clone(), access_key: access_key.clone(), self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, @@ -466,11 +558,11 @@ mod tests { (clusters, acl_storages, sessions) } - fn do_messages_exchange(clusters: &[Arc], sessions: &[Session]) { + fn do_messages_exchange(clusters: &[Arc], sessions: &[SessionImpl]) { do_messages_exchange_until(clusters, sessions, |_, _, _| false); } - fn do_messages_exchange_until(clusters: &[Arc], sessions: &[Session], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { + fn do_messages_exchange_until(clusters: &[Arc], sessions: &[SessionImpl], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { while let Some((from, to, message)) = clusters.iter().filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))).next() { let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; if cond(&from, &to, &message) { @@ -478,10 +570,10 @@ mod tests { } match message { - Message::InitializeDecryptionSession(message) => session.on_initialize_session(from, message).unwrap(), - Message::ConfirmDecryptionInitialization(message) => session.on_confirm_initialization(from, message).unwrap(), - Message::RequestPartialDecryption(message) => session.on_partial_decryption_requested(from, message).unwrap(), - Message::PartialDecryption(message) => session.on_partial_decryption(from, message).unwrap(), + Message::Decryption(DecryptionMessage::InitializeDecryptionSession(message)) => session.on_initialize_session(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(message)) => session.on_confirm_initialization(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::RequestPartialDecryption(message)) => session.on_partial_decryption_requested(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::PartialDecryption(message)) => session.on_partial_decryption(from, &message).unwrap(), _ => panic!("unexpected"), } } @@ -492,11 +584,11 @@ mod tests { let mut nodes = BTreeMap::new(); let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -517,11 +609,11 @@ mod tests { let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -542,11 +634,11 @@ mod tests { let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 2, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -572,70 +664,70 @@ mod tests { fn fails_to_accept_initialization_when_already_initialized() { let (_, _, sessions) = prepare_decryption_sessions(); assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn fails_to_partial_decrypt_if_not_waiting() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn fails_to_partial_decrypt_if_requested_by_slave() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(2).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(2).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_accept_partial_decrypt_if_not_waiting() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), message::PartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - shadow_point: Random.generate().unwrap().public().clone(), + assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), &message::PartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + shadow_point: Random.generate().unwrap().public().clone().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -647,7 +739,7 @@ mod tests { let mut pd_from = None; let mut pd_msg = None; do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg { - &Message::PartialDecryption(ref msg) => { + &Message::Decryption(DecryptionMessage::PartialDecryption(ref msg)) => { pd_from = Some(from.clone()); pd_msg = Some(msg.clone()); true @@ -655,8 +747,8 @@ mod tests { _ => false, }); - assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), pd_msg.clone().unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); + assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), &pd_msg.clone().unwrap()).unwrap(), ()); + assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), &pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); } #[test] @@ -704,4 +796,9 @@ mod tests { // 3) 0 sessions have decrypted key value assert!(sessions.iter().all(|s| s.decrypted_secret().is_none())); } + + #[test] + fn decryption_session_works_over_network() { + // TODO + } } diff --git a/secret_store/src/key_server_cluster/encryption_session.rs b/secret_store/src/key_server_cluster/encryption_session.rs index 6f5705a73..beca00443 100644 --- a/secret_store/src/key_server_cluster/encryption_session.rs +++ b/secret_store/src/key_server_cluster/encryption_session.rs @@ -17,13 +17,22 @@ use std::collections::{BTreeSet, BTreeMap, VecDeque}; use std::fmt::{Debug, Formatter, Error as FmtError}; use std::sync::Arc; -use parking_lot::Mutex; +use parking_lot::{Condvar, Mutex}; use ethkey::{Public, Secret}; -use key_server_cluster::{Error, NodeId, SessionId}; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare}; use key_server_cluster::math; use key_server_cluster::cluster::Cluster; -use key_server_cluster::message::{Message, InitializeSession, ConfirmInitialization, CompleteInitialization, - KeysDissemination, Complaint, ComplaintResponse, PublicKeyShare}; +use key_server_cluster::message::{Message, EncryptionMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, + KeysDissemination, Complaint, ComplaintResponse, PublicKeyShare, SessionError, SessionCompleted}; + +/// Encryption session API. +pub trait Session: Send + Sync + 'static { + #[cfg(test)] + /// Get joint public key (if it is known). + fn joint_public_key(&self) -> Option; + /// Wait until session is completed. Returns distributely generated secret key. + fn wait(&self) -> Result; +} /// Encryption (distributed key generation) session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -34,17 +43,34 @@ use key_server_cluster::message::{Message, InitializeSession, ConfirmInitializat /// 3) key verification (KV): all nodes are checking values, received for other nodes and complaining if keys are wrong /// 4) key check phase (KC): nodes are processing complaints, received from another nodes /// 5) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key -pub struct Session { +/// 6) encryption phase: master node generates secret key, encrypts it using joint public && broadcasts encryption result +pub struct SessionImpl { /// Unique session id. id: SessionId, /// Public identifier of this node. self_node_id: NodeId, + /// Key storage. + key_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// SessionImpl completion condvar. + completed: Condvar, /// Mutable session data. data: Mutex, } +/// SessionImpl creation parameters +pub struct SessionParams { + /// SessionImpl identifier. + pub id: SessionId, + /// Id of node, on which this session is running. + pub self_node_id: Public, + /// Key storage. + pub key_storage: Arc, + /// Cluster + pub cluster: Arc, +} + #[derive(Debug)] /// Mutable data of encryption (distributed key generation) session. struct SessionData { @@ -74,7 +100,9 @@ struct SessionData { /// === Values, filled when DKG session is completed successfully === /// Jointly generated public key, which can be used to encrypt secret. Public. - joint_public: Option, + joint_public: Option>, + /// Secret point. + secret_point: Option>, } #[derive(Debug, Clone)] @@ -95,13 +123,17 @@ struct NodeData { /// Public values, which have been received from this node. pub publics: Option>, - /// === Values, filled during KC phase === + // === Values, filled during KC phase === /// Nodes, complaining against this node. pub complaints: BTreeSet, - /// === Values, filled during KG phase === + // === Values, filled during KG phase === /// Public share, which has been received from this node. pub public_share: Option, + + // === Values, filled during encryption phase === + /// Flags marking that node has confirmed session completion (encryption data is stored). + pub completion_confirmed: bool, } #[derive(Debug, Clone, PartialEq)] @@ -139,6 +171,10 @@ pub enum SessionState { /// Node is waiting for joint public key share to be received from every other node. WaitingForPublicKeyShare, + // === Encryption phase states === + /// Node is waiting for session completion/session completion confirmation. + WaitingForEncryptionConfirmation, + // === Final states of the session === /// Joint public key generation is completed. Finished, @@ -146,13 +182,15 @@ pub enum SessionState { Failed, } -impl Session { +impl SessionImpl { /// Create new encryption session. - pub fn new(id: SessionId, self_node_id: Public, cluster: Arc) -> Self { - Session { - id: id, - self_node_id: self_node_id, - cluster: cluster, + pub fn new(params: SessionParams) -> Self { + SessionImpl { + id: params.id, + self_node_id: params.self_node_id, + key_storage: params.key_storage, + cluster: params.cluster, + completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, master: None, @@ -162,10 +200,16 @@ impl Session { secret_coeff: None, secret_share: None, joint_public: None, + secret_point: None, }), } } + /// Get this session Id. + pub fn id(&self) -> &SessionId { + &self.id + } + /// Get this node Id. pub fn node(&self) -> &NodeId { &self.self_node_id @@ -176,11 +220,6 @@ impl Session { self.data.lock().state.clone() } - /// Get joint public key. - pub fn joint_public_key(&self) -> Option { - self.data.lock().joint_public.clone() - } - #[cfg(test)] /// Get derived point. pub fn derived_point(&self) -> Option { @@ -220,15 +259,15 @@ impl Session { // start initialization let derived_point = math::generate_random_point()?; - self.cluster.send(&next_node, Message::InitializeSession(InitializeSession { - session: self.id.clone(), - derived_point: derived_point, - })) + self.cluster.send(&next_node, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + derived_point: derived_point.into(), + }))) } /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, mut message: InitializeSession) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeSession) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -239,13 +278,14 @@ impl Session { } // update derived point with random scalar - math::update_random_point(&mut message.derived_point)?; + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; // send confirmation back to master node - self.cluster.send(&sender, Message::ConfirmInitialization(ConfirmInitialization { - session: self.id.clone(), - derived_point: message.derived_point, - }))?; + self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmInitialization(ConfirmInitialization { + session: self.id.clone().into(), + derived_point: derived_point.into(), + })))?; // update state data.master = Some(sender); @@ -255,8 +295,8 @@ impl Session { } /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, mut message: ConfirmInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -277,25 +317,26 @@ impl Session { // proceed message match next_receiver { Some(next_receiver) => { - return self.cluster.send(&next_receiver, Message::InitializeSession(InitializeSession { - session: self.id.clone(), - derived_point: message.derived_point, - })); + return self.cluster.send(&next_receiver, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + derived_point: message.derived_point.clone().into(), + }))); }, None => { // update point once again to make sure that derived point is not generated by last node - math::update_random_point(&mut message.derived_point)?; + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; // remember derived point - data.derived_point = Some(message.derived_point.clone()); + data.derived_point = Some(derived_point.clone().into()); // broadcast derived point && other session paraeters to every other node - self.cluster.broadcast(Message::CompleteInitialization(CompleteInitialization { - session: self.id.clone(), - nodes: data.nodes.iter().map(|(id, data)| (id.clone(), data.id_number.clone())).collect(), + self.cluster.broadcast(Message::Encryption(EncryptionMessage::CompleteInitialization(CompleteInitialization { + session: self.id.clone().into(), + nodes: data.nodes.iter().map(|(id, data)| (id.clone().into(), data.id_number.clone().into())).collect(), threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), - derived_point: message.derived_point, - }))?; + derived_point: derived_point.into(), + })))?; }, } @@ -305,12 +346,12 @@ impl Session { } /// When session initialization completion message is received. - pub fn on_complete_initialization(&self, sender: NodeId, message: CompleteInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complete_initialization(&self, sender: NodeId, message: &CompleteInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); // check message - let nodes_ids = message.nodes.keys().cloned().collect(); + let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); check_cluster_nodes(self.node(), &nodes_ids)?; check_threshold(message.threshold, &nodes_ids)?; @@ -326,8 +367,8 @@ impl Session { // remember passed data data.threshold = Some(message.threshold); - data.derived_point = Some(message.derived_point); - data.nodes = message.nodes.into_iter().map(|(id, number)| (id, NodeData::with_id_number(number))).collect(); + data.derived_point = Some(message.derived_point.clone().into()); + data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect(); // now it is time for keys dissemination (KD) phase drop(data); @@ -335,17 +376,20 @@ impl Session { } /// When keys dissemination message is received. - pub fn on_keys_dissemination(&self, sender: NodeId, message: KeysDissemination) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_keys_dissemination(&self, sender: NodeId, message: &KeysDissemination) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); // check state if data.state != SessionState::WaitingForKeysDissemination { - return Err(Error::InvalidStateForRequest); + match data.state { + SessionState::WaitingForInitializationComplete => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } } + debug_assert!(data.nodes.contains_key(&sender)); // check message let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); @@ -360,9 +404,9 @@ impl Session { return Err(Error::InvalidStateForRequest); } - node_data.secret1 = Some(message.secret1); - node_data.secret2 = Some(message.secret2); - node_data.publics = Some(message.publics); + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); + node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); } // check if we have received keys from every other node @@ -382,10 +426,10 @@ impl Session { if !is_key_verification_ok { node_data.complaints.insert(self.node().clone()); - self.cluster.broadcast(Message::Complaint(Complaint { - session: self.id.clone(), - against: node_id.clone(), - }))?; + self.cluster.broadcast(Message::Encryption(EncryptionMessage::Complaint(Complaint { + session: self.id.clone().into(), + against: node_id.clone().into(), + })))?; } } @@ -396,8 +440,8 @@ impl Session { } /// When complaint is received. - pub fn on_complaint(&self, sender: NodeId, message: Complaint) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complaint(&self, sender: NodeId, message: &Complaint) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -412,16 +456,16 @@ impl Session { } // respond to complaint - if &message.against == self.node() { + if &*message.against == self.node() { let secret1_sent = data.nodes[&sender].secret1_sent.clone().expect("secrets were sent on KD phase; KC phase follows KD phase; qed"); let secret2_sent = data.nodes[&sender].secret2_sent.clone().expect("secrets were sent on KD phase; KC phase follows KD phase; qed"); // someone is complaining against us => let's respond - return self.cluster.broadcast(Message::ComplaintResponse(ComplaintResponse { - session: self.id.clone(), - secret1: secret1_sent, - secret2: secret2_sent, - })); + return self.cluster.broadcast(Message::Encryption(EncryptionMessage::ComplaintResponse(ComplaintResponse { + session: self.id.clone().into(), + secret1: secret1_sent.into(), + secret2: secret2_sent.into(), + }))); } // someone is complaining against someone else => let's remember this @@ -434,15 +478,15 @@ impl Session { if is_critical_complaints_num { // too many complaints => exclude from session - Session::disqualify_node(&message.against, &*self.cluster, &mut *data); + SessionImpl::disqualify_node(&message.against, &*self.cluster, &mut *data); } Ok(()) } /// When complaint response is received - pub fn on_complaint_response(&self, sender: NodeId, message: ComplaintResponse) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complaint_response(&self, sender: NodeId, message: &ComplaintResponse) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -471,11 +515,11 @@ impl Session { }; if !is_key_verification_ok { - Session::disqualify_node(&sender, &*self.cluster, &mut *data); + SessionImpl::disqualify_node(&sender, &*self.cluster, &mut *data); } else { let node_data = data.nodes.get_mut(&sender).expect("cluster guarantees to deliver messages from qualified nodes only; qed"); - node_data.secret1 = Some(message.secret1); - node_data.secret2 = Some(message.secret2); + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); node_data.complaints.remove(self.node()); } @@ -510,19 +554,24 @@ impl Session { self_node.public_share = Some(self_public_share.clone()); // broadcast self public key share - self.cluster.broadcast(Message::PublicKeyShare(PublicKeyShare { - session: self.id.clone(), - public_share: self_public_share, - })) + self.cluster.broadcast(Message::Encryption(EncryptionMessage::PublicKeyShare(PublicKeyShare { + session: self.id.clone().into(), + public_share: self_public_share.into(), + }))) } /// When public key share is received. - pub fn on_public_key_share(&self, sender: NodeId, message: PublicKeyShare) -> Result<(), Error> { + pub fn on_public_key_share(&self, sender: NodeId, message: &PublicKeyShare) -> Result<(), Error> { let mut data = self.data.lock(); // check state if data.state != SessionState::WaitingForPublicKeyShare { - return Err(Error::InvalidStateForRequest); + match data.state { + SessionState::WaitingForInitializationComplete | + SessionState::WaitingForKeysDissemination | + SessionState::KeyCheck => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } } // update node data with received public share @@ -532,7 +581,7 @@ impl Session { return Err(Error::InvalidMessage); } - node_data.public_share = Some(message.public_share); + node_data.public_share = Some(message.public_share.clone().into()); } // if there's also nodes, which has not sent us their public shares - do nothing @@ -540,16 +589,149 @@ impl Session { return Ok(()); } - // else - calculate joint public key && finish session - data.joint_public = { + // else - calculate joint public key + let joint_public = { let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - Some(math::compute_joint_public(public_shares)?) + math::compute_joint_public(public_shares)? }; - data.state = SessionState::Finished; + + // if we are at the slave node - wait for session completion + if data.master.as_ref() != Some(self.node()) { + data.joint_public = Some(Ok(joint_public)); + data.state = SessionState::WaitingForEncryptionConfirmation; + return Ok(()); + } + + // then generate secret point + // then encrypt secret point with joint public key + let secret_point = math::generate_random_point()?; + let encrypted_secret_point = math::encrypt_secret(&secret_point, &joint_public)?; + + // then save encrypted data to the key storage + let encrypted_data = DocumentKeyShare { + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: encrypted_secret_point.common_point, + encrypted_point: encrypted_secret_point.encrypted_point, + }; + self.key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + + // then distribute encrypted data to every other node + self.cluster.broadcast(Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + common_point: encrypted_data.common_point.clone().into(), + encrypted_point: encrypted_data.encrypted_point.clone().into(), + })))?; + + // then wait for confirmation from all other nodes + { + let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); + self_node.completion_confirmed = true; + } + data.joint_public = Some(Ok(joint_public)); + data.secret_point = Some(Ok(secret_point)); + data.state = SessionState::WaitingForEncryptionConfirmation; Ok(()) } + /// When session completion message is received. + pub fn on_session_completed(&self, sender: NodeId, message: &SessionCompleted) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); + + // check state + if data.state != SessionState::WaitingForEncryptionConfirmation { + match data.state { + SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + } + + // if we are not masters, save result and respond with confirmation + if data.master.as_ref() != Some(self.node()) { + // check that we have received message from master + if data.master.as_ref() != Some(&sender) { + return Err(Error::InvalidMessage); + } + + // save encrypted data to key storage + let encrypted_data = DocumentKeyShare { + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: message.common_point.clone().into(), + encrypted_point: message.encrypted_point.clone().into(), + }; + self.key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + + // then respond with confirmation + data.state = SessionState::Finished; + return self.cluster.send(&sender, Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + common_point: encrypted_data.common_point.clone().into(), + encrypted_point: encrypted_data.encrypted_point.clone().into(), + }))); + } + + // remember that we have received confirmation from sender node + { + let sender_node = data.nodes.get_mut(&sender).expect("node is always qualified by himself; qed"); + if sender_node.completion_confirmed { + return Err(Error::InvalidMessage); + } + + sender_node.completion_confirmed = true; + } + + // check if we have received confirmations from all cluster nodes + if data.nodes.iter().any(|(_, node_data)| !node_data.completion_confirmed) { + return Ok(()) + } + + // we have received enough confirmations => complete session + data.state = SessionState::Finished; + self.completed.notify_all(); + + Ok(()) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: NodeId, message: &SessionError) { + warn!("{}: encryption session error: {:?} from {}", self.node(), message, sender); + let mut data = self.data.lock(); + data.state = SessionState::Failed; + data.joint_public = Some(Err(Error::Io(message.error.clone()))); + data.secret_point = Some(Err(Error::Io(message.error.clone()))); + self.completed.notify_all(); + } + + /// When session timeout has occured. + pub fn on_session_timeout(&self, node: &NodeId) { + warn!("{}: encryption session timeout", self.node()); + let mut data = self.data.lock(); + + match data.state { + SessionState::WaitingForInitialization | + SessionState::WaitingForInitializationConfirm(_) | + SessionState::WaitingForInitializationComplete => (), + _ => if !data.nodes.contains_key(node) { + return; + }, + } + + data.state = SessionState::Failed; + data.joint_public = Some(Err(Error::Io("session expired".into()))); + data.secret_point = Some(Err(Error::Io("session expired".into()))); + self.completed.notify_all(); + } + /// Keys dissemination (KD) phase fn disseminate_keys(&self) -> Result<(), Error> { let mut data = self.data.lock(); @@ -576,12 +758,12 @@ impl Session { node_data.secret1_sent = Some(secret1.clone()); node_data.secret2_sent = Some(secret2.clone()); - self.cluster.send(&node, Message::KeysDissemination(KeysDissemination { - session: self.id.clone(), - secret1: secret1, - secret2: secret2, - publics: publics.clone(), - }))?; + self.cluster.send(&node, Message::Encryption(EncryptionMessage::KeysDissemination(KeysDissemination { + session: self.id.clone().into(), + secret1: secret1.into(), + secret2: secret2.into(), + publics: publics.iter().cloned().map(Into::into).collect(), + })))?; } else { node_data.secret1 = Some(secret1); node_data.secret2 = Some(secret2); @@ -599,7 +781,7 @@ impl Session { fn disqualify_node(node: &NodeId, cluster: &Cluster, data: &mut SessionData) { let threshold = data.threshold .expect("threshold is filled on initialization phase; node can only be disqualified during KC phase; KC phase follows initialization phase; qed"); - + // blacklist node cluster.blacklist(&node); // too many complaints => exclude from session @@ -612,6 +794,25 @@ impl Session { } } +impl Session for SessionImpl { + #[cfg(test)] + fn joint_public_key(&self) -> Option { + self.data.lock().joint_public.clone().and_then(|r| r.ok()) + } + + + fn wait(&self) -> Result { + let mut data = self.data.lock(); + if !data.secret_point.is_some() { + self.completed.wait(&mut data); + } + + data.secret_point.as_ref() + .expect("checked above or waited for completed; completed is only signaled when secret_point.is_some(); qed") + .clone() + } +} + impl EveryOtherNodeVisitor { pub fn new(self_id: &NodeId, nodes: I) -> Self where I: Iterator { EveryOtherNodeVisitor { @@ -648,11 +849,12 @@ impl NodeData { secret2: None, publics: None, public_share: None, + completion_confirmed: false, } } } -impl Debug for Session { +impl Debug for SessionImpl { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { write!(f, "Encryption session {} on {}", self.id, self.self_node_id) } @@ -682,26 +884,29 @@ pub fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), #[cfg(test)] mod tests { + use std::time; use std::sync::Arc; - use std::collections::{BTreeSet, BTreeMap}; + use std::collections::{BTreeSet, BTreeMap, VecDeque}; + use tokio_core::reactor::Core; use ethkey::{Random, Generator}; - use key_server_cluster::{NodeId, SessionId, Error}; - use key_server_cluster::message::{self, Message}; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::encryption_session::{Session, SessionState}; + use key_server_cluster::{NodeId, SessionId, Error, DummyKeyStorage}; + use key_server_cluster::message::{self, Message, EncryptionMessage}; + use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established}; + use key_server_cluster::encryption_session::{Session, SessionImpl, SessionState, SessionParams}; use key_server_cluster::math; use key_server_cluster::math::tests::do_encryption_and_decryption; #[derive(Debug)] struct Node { pub cluster: Arc, - pub session: Session, + pub session: SessionImpl, } #[derive(Debug)] struct MessageLoop { pub session_id: SessionId, pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, } impl MessageLoop { @@ -712,7 +917,12 @@ mod tests { let key_pair = Random.generate().unwrap(); let node_id = key_pair.public().clone(); let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = Session::new(session_id.clone(), node_id.clone(), cluster.clone()); + let session = SessionImpl::new(SessionParams { + id: session_id.clone(), + self_node_id: node_id.clone(), + key_storage: Arc::new(DummyKeyStorage::default()), + cluster: cluster.clone(), + }); nodes.insert(node_id, Node { cluster: cluster, session: session }); } @@ -726,22 +936,23 @@ mod tests { MessageLoop { session_id: session_id, nodes: nodes, + queue: VecDeque::new(), } } - pub fn master(&self) -> &Session { + pub fn master(&self) -> &SessionImpl { &self.nodes.values().nth(0).unwrap().session } - pub fn first_slave(&self) -> &Session { + pub fn first_slave(&self) -> &SessionImpl { &self.nodes.values().nth(1).unwrap().session } - pub fn second_slave(&self) -> &Session { + pub fn second_slave(&self) -> &SessionImpl { &self.nodes.values().nth(2).unwrap().session } - pub fn third_slave(&self) -> &Session { + pub fn third_slave(&self) -> &SessionImpl { &self.nodes.values().nth(3).unwrap().session } @@ -749,18 +960,29 @@ mod tests { self.nodes.values() .filter_map(|n| n.cluster.take_message().map(|m| (n.session.node().clone(), m.0, m.1))) .nth(0) + .or_else(|| self.queue.pop_front()) } pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match msg.2 { - Message::InitializeSession(message) => self.nodes[&msg.1].session.on_initialize_session(msg.0, message), - Message::ConfirmInitialization(message) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0, message), - Message::CompleteInitialization(message) => self.nodes[&msg.1].session.on_complete_initialization(msg.0, message), - Message::KeysDissemination(message) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0, message), - Message::Complaint(message) => self.nodes[&msg.1].session.on_complaint(msg.0, message), - Message::ComplaintResponse(message) => self.nodes[&msg.1].session.on_complaint_response(msg.0, message), - Message::PublicKeyShare(message) => self.nodes[&msg.1].session.on_public_key_share(msg.0, message), - _ => panic!("unexpected"), + match { + match msg.2 { + Message::Encryption(EncryptionMessage::InitializeSession(ref message)) => self.nodes[&msg.1].session.on_initialize_session(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::ConfirmInitialization(ref message)) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::CompleteInitialization(ref message)) => self.nodes[&msg.1].session.on_complete_initialization(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::KeysDissemination(ref message)) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::Complaint(ref message)) => self.nodes[&msg.1].session.on_complaint(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::ComplaintResponse(ref message)) => self.nodes[&msg.1].session.on_complaint_response(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::PublicKeyShare(ref message)) => self.nodes[&msg.1].session.on_public_key_share(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::SessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(msg.0.clone(), &message), + _ => panic!("unexpected"), + } + } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), } } @@ -796,7 +1018,12 @@ mod tests { fn fails_to_initialize_if_not_a_part_of_cluster() { let node_id = math::generate_random_point().unwrap(); let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = Session::new(SessionId::default(), node_id.clone(), cluster); + let session = SessionImpl::new(SessionParams { + id: SessionId::default(), + self_node_id: node_id.clone(), + key_storage: Arc::new(DummyKeyStorage::default()), + cluster: cluster, + }); let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect(); assert_eq!(session.initialize(0, cluster_nodes).unwrap_err(), Error::InvalidNodesConfiguration); } @@ -816,9 +1043,9 @@ mod tests { fn fails_to_accept_initialization_when_already_initialized() { let (sid, m, _, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.first_slave().on_initialize_session(m, message::InitializeSession { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -826,16 +1053,16 @@ mod tests { fn slave_updates_derived_point_on_initialization() { let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); let passed_point = match l.take_message().unwrap() { - (f, t, Message::InitializeSession(message)) => { + (f, t, Message::Encryption(EncryptionMessage::InitializeSession(message))) => { let point = message.derived_point.clone(); - l.process_message((f, t, Message::InitializeSession(message))).unwrap(); + l.process_message((f, t, Message::Encryption(EncryptionMessage::InitializeSession(message)))).unwrap(); point }, _ => panic!("unexpected"), }; match l.take_message().unwrap() { - (_, _, Message::ConfirmInitialization(message)) => assert!(passed_point != message.derived_point), + (_, _, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => assert!(passed_point != message.derived_point), _ => panic!("unexpected"), } } @@ -846,9 +1073,9 @@ mod tests { l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, message::ConfirmInitialization { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -857,9 +1084,9 @@ mod tests { let (sid, _, s, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, message::ConfirmInitialization { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -868,15 +1095,15 @@ mod tests { let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); let passed_point = match l.take_message().unwrap() { - (f, t, Message::ConfirmInitialization(message)) => { + (f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => { let point = message.derived_point.clone(); - l.process_message((f, t, Message::ConfirmInitialization(message))).unwrap(); + l.process_message((f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message)))).unwrap(); point }, _ => panic!("unexpected"), }; - assert!(passed_point != l.master().derived_point().unwrap()); + assert!(l.master().derived_point().unwrap() != passed_point.into()); } #[test] @@ -884,11 +1111,11 @@ mod tests { let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap(); let mut nodes = BTreeMap::new(); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidNodesCount); } @@ -898,11 +1125,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(math::generate_random_point().unwrap(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidNodesConfiguration); } @@ -912,11 +1139,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 2, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidThreshold); } @@ -926,11 +1153,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -945,22 +1172,22 @@ mod tests { nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); nodes.insert(l.second_slave().node().clone(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.master().on_keys_dissemination(s, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap()], + assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidStateForRequest); } @@ -974,11 +1201,11 @@ mod tests { l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap(), math::generate_random_point().unwrap()], + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidMessage); } @@ -992,11 +1219,11 @@ mod tests { l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap()], + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1004,12 +1231,12 @@ mod tests { fn defends_if_receives_complain_on_himself() { let (sid, m, s, mut l) = make_simple_cluster(1, 3).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(s, message::Complaint { - session: sid, - against: m, + l.master().on_complaint(s, &message::Complaint { + session: sid.into(), + against: m.into(), }).unwrap(); match l.take_message().unwrap() { - (_, _, Message::ComplaintResponse(_)) => (), + (_, _, Message::Encryption(EncryptionMessage::ComplaintResponse(_))) => (), _ => panic!("unexpected"), } } @@ -1018,13 +1245,13 @@ mod tests { fn node_is_disqualified_if_enough_complaints_received() { let (sid, _, s, mut l) = make_simple_cluster(1, 4).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s.clone(), + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.clone().into(), }).unwrap(); - l.master().on_complaint(l.third_slave().node().clone(), message::Complaint { - session: sid, - against: s, + l.master().on_complaint(l.third_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); } @@ -1033,13 +1260,13 @@ mod tests { fn node_is_not_disqualified_if_enough_complaints_received_from_the_same_node() { let (sid, _, s, mut l) = make_simple_cluster(1, 4).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s.clone(), + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.clone().into(), }).unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s, + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 4); } @@ -1058,17 +1285,17 @@ mod tests { l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination let s2 = l.second_slave().node().clone(); - l.master().on_keys_dissemination(s2.clone(), message::KeysDissemination { - session: sid.clone(), - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap(), math::generate_random_point().unwrap()], + l.master().on_keys_dissemination(s2.clone(), &message::KeysDissemination { + session: sid.clone().into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); - l.master().on_complaint_response(s2, message::ComplaintResponse { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), + l.master().on_complaint_response(s2, &message::ComplaintResponse { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 2); } @@ -1087,22 +1314,22 @@ mod tests { l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination let (f, t, msg) = match l.take_message() { - Some((f, t, Message::KeysDissemination(msg))) => (f, t, msg), + Some((f, t, Message::Encryption(EncryptionMessage::KeysDissemination(msg)))) => (f, t, msg), _ => panic!("unexpected"), }; assert_eq!(&f, l.second_slave().node()); assert_eq!(&t, l.master().node()); - l.master().on_keys_dissemination(f.clone(), message::KeysDissemination { - session: sid.clone(), - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: msg.publics.clone(), + l.master().on_keys_dissemination(f.clone(), &message::KeysDissemination { + session: sid.clone().into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: msg.publics.clone().into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); - l.master().on_complaint_response(f, message::ComplaintResponse { - session: sid, - secret1: msg.secret1, - secret2: msg.secret2, + l.master().on_complaint_response(f, &message::ComplaintResponse { + session: sid.into(), + secret1: msg.secret1.into(), + secret2: msg.secret2.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); } @@ -1116,9 +1343,9 @@ mod tests { #[test] fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); - assert_eq!(l.master().on_public_key_share(s, message::PublicKeyShare { - session: sid, - public_share: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1129,24 +1356,21 @@ mod tests { l.master().start_key_generation_phase().unwrap(); l.first_slave().start_key_generation_phase().unwrap(); let (f, t, msg) = match l.take_message() { - Some((f, t, Message::PublicKeyShare(msg))) => (f, t, msg), + Some((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg)))) => (f, t, msg), _ => panic!("unexpected"), }; assert_eq!(&f, l.master().node()); assert_eq!(&t, l.first_slave().node()); - l.process_message((f, t, Message::PublicKeyShare(msg.clone()))).unwrap(); - assert_eq!(l.first_slave().on_public_key_share(m, message::PublicKeyShare { - session: sid, - public_share: math::generate_random_point().unwrap(), + l.process_message((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg.clone())))).unwrap(); + assert_eq!(l.first_slave().on_public_key_share(m, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn complete_enc_dec_session() { - // TODO: when number of nodes, needed to decrypt message is odd, algorithm won't work - // let test_cases = [/*(0, 2), */(1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), - // (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; - let test_cases = [(3, 5)]; + let test_cases = [(0, 5), (2, 5), (3, 5)]; for &(threshold, num_nodes) in &test_cases { let mut l = MessageLoop::new(num_nodes); l.master().initialize(threshold, l.nodes.keys().cloned().collect()).unwrap(); @@ -1194,4 +1418,26 @@ mod tests { } // TODO: add test where some nodes are disqualified from session + + #[test] + fn encryption_session_works_over_network() { + //::util::log::init_log(); + + let test_cases = [(1, 3)]; + for &(threshold, num_nodes) in &test_cases { + let mut core = Core::new().unwrap(); + + // prepare cluster objects for each node + let clusters = make_clusters(&core, 6020, num_nodes); + run_clusters(&clusters); + + // establish connections + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + + // run session to completion + let session_id = SessionId::default(); + let session = clusters[0].client().new_encryption_session(session_id, threshold).unwrap(); + loop_until(&mut core, time::Duration::from_millis(1000), || session.joint_public_key().is_some()); + } + } } diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs new file mode 100644 index 000000000..7b8c4d0ed --- /dev/null +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -0,0 +1,85 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::time::Duration; +use futures::{Future, Select, BoxFuture, Poll, Async}; +use tokio_core::reactor::{Handle, Timeout}; + +type DeadlineBox where F: Future = BoxFuture, F::Error>; + +/// Complete a passed future or fail if it is not completed within timeout. +pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> + where F: Future + Send + 'static, T: 'static { + let timeout = try!(Timeout::new(duration, handle)).map(|_| DeadlineStatus::Timeout).boxed(); + let future = future.map(DeadlineStatus::Meet).boxed(); + let deadline = Deadline { + future: timeout.select(future), + }; + Ok(deadline) +} + +#[derive(Debug, PartialEq)] +/// Deadline future completion status. +pub enum DeadlineStatus { + /// Completed a future. + Meet(T), + /// Faled with timeout. + Timeout, +} + +/// Future, which waits for passed future completion within given period, or fails with timeout. +pub struct Deadline where F: Future { + future: Select, DeadlineBox>, +} + +impl Future for Deadline where F: Future { + type Item = DeadlineStatus; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + match self.future.poll() { + Ok(Async::Ready((result, _other))) => Ok(Async::Ready(result)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err((err, _other)) => Err(err), + } + } +} + +#[cfg(test)] +mod tests { + use std::io; + use std::time::Duration; + use futures::{Future, empty, done}; + use tokio_core::reactor::Core; + use super::{deadline, DeadlineStatus}; + + //#[test] TODO: not working + fn _deadline_timeout_works() { + let mut core = Core::new().unwrap(); + let deadline = deadline(Duration::from_millis(1), &core.handle(), empty::<(), io::Error>()).unwrap(); + core.turn(Some(Duration::from_millis(3))); + assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Timeout); + } + + #[test] + fn deadline_result_works() { + let mut core = Core::new().unwrap(); + let deadline = deadline(Duration::from_millis(1000), &core.handle(), done(Ok(()))).unwrap(); + core.turn(Some(Duration::from_millis(3))); + assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(())); + } +} \ No newline at end of file diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs new file mode 100644 index 000000000..0d71d25aa --- /dev/null +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -0,0 +1,320 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::collections::BTreeSet; +use futures::{Future, Poll, Async}; +use ethkey::{Random, Generator, KeyPair, Secret, sign, verify_public}; +use util::H256; +use key_server_cluster::{NodeId, Error}; +use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; +use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage, + read_message, compute_shared_key}; + +/// Start handshake procedure with another node from the cluster. +pub fn handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); + handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes) +} + +/// Start handshake procedure with another node from the cluster and given plain confirmation. +pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let (error, state) = match self_confirmation_plain.clone() + .and_then(|c| Handshake::::make_public_key_message(self_key_pair.public().clone(), c)) { + Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))), + Err(err) => (Some((a, Err(err))), HandshakeState::Finished), + }; + + Handshake { + is_active: true, + error: error, + state: state, + self_key_pair: self_key_pair, + self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), + trusted_nodes: trusted_nodes, + other_node_id: None, + other_confirmation_plain: None, + shared_key: None, + } +} + +/// Wait for handshake procedure to be started by another node from the cluster. +pub fn accept_handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); + let (error, state) = match self_confirmation_plain.clone() { + Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), + Err(err) => (Some((a, Err(err))), HandshakeState::Finished), + }; + + Handshake { + is_active: false, + error: error, + state: state, + self_key_pair: self_key_pair, + self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), + trusted_nodes: trusted_nodes, + other_node_id: None, + other_confirmation_plain: None, + shared_key: None, + } +} + +#[derive(Debug, PartialEq)] +/// Result of handshake procedure. +pub struct HandshakeResult { + /// Node id. + pub node_id: NodeId, + /// Shared key. + pub shared_key: Secret, +} + +/// Future handshake procedure. +pub struct Handshake { + is_active: bool, + error: Option<(A, Result)>, + state: HandshakeState, + self_key_pair: KeyPair, + self_confirmation_plain: H256, + trusted_nodes: BTreeSet, + other_node_id: Option, + other_confirmation_plain: Option, + shared_key: Option, +} + +/// Active handshake state. +enum HandshakeState { + SendPublicKey(WriteMessage), + ReceivePublicKey(ReadMessage), + SendPrivateKeySignature(WriteMessage), + ReceivePrivateKeySignature(ReadMessage), + Finished, +} + +impl Handshake where A: io::Read + io::Write { + #[cfg(test)] + pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) { + self.self_confirmation_plain = self_confirmation_plain; + } + + pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: self_node_id.into(), + confirmation_plain: confirmation_plain.into(), + }))) + } + + fn make_private_key_signature_message(secret: &Secret, confirmation_plain: &H256) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: sign(secret, confirmation_plain)?.into(), + }))) + } +} + +impl Future for Handshake where A: io::Read + io::Write { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + if let Some(error_result) = self.error.take() { + return Ok(error_result.into()); + } + + let (next, result) = match self.state { + HandshakeState::SendPublicKey(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); + + if self.is_active { + (HandshakeState::ReceivePublicKey( + read_message(stream) + ), Async::NotReady) + } else { + self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.other_node_id.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_node_id is filled in ReceivePublicKey; qed") + ) { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; + + let message = match Handshake::::make_private_key_signature_message( + self.self_key_pair.secret(), + self.other_confirmation_plain.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_confirmation_plain is filled in ReceivePublicKey; qed") + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, + self.shared_key.as_ref().expect("filled couple of lines above; qed"), + message)), Async::NotReady) + } + }, + HandshakeState::ReceivePublicKey(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); + + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePublicKey(message)) => message, + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; + + if !self.trusted_nodes.contains(&*message.node_id) { + return Ok((stream, Err(Error::InvalidNodeId)).into()); + } + + self.other_node_id = Some(message.node_id.into()); + self.other_confirmation_plain = Some(message.confirmation_plain.into()); + if self.is_active { + self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.other_node_id.as_ref().expect("filled couple of lines above; qed") + ) { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; + + let message = match Handshake::::make_private_key_signature_message( + self.self_key_pair.secret(), + self.other_confirmation_plain.as_ref().expect("filled couple of lines above; qed") + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, + self.shared_key.as_ref().expect("filled couple of lines above; qed"), + message)), Async::NotReady) + } else { + let message = match Handshake::::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone()) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPublicKey(write_message(stream, message)), Async::NotReady) + } + }, + HandshakeState::SendPrivateKeySignature(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); + + (HandshakeState::ReceivePrivateKeySignature( + read_message(stream) + ), Async::NotReady) + }, + HandshakeState::ReceivePrivateKeySignature(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); + + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePrivateKeySignature(message)) => message, + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; + + let other_node_public = self.other_node_id.as_ref().expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"); + if !verify_public(other_node_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) { + return Ok((stream, Err(Error::InvalidMessage)).into()); + } + + (HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult { + node_id: self.other_node_id.expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"), + shared_key: self.shared_key.clone().expect("shared_key is filled in Send/ReceivePublicKey; ReceivePrivateKeySignature follows Send/ReceivePublicKey; qed"), + })))) + }, + HandshakeState::Finished => panic!("poll Handshake after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + use futures::Future; + use ethcrypto::ecdh::agree; + use ethkey::{Random, Generator, sign}; + use util::H256; + use key_server_cluster::io::message::tests::TestIo; + use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; + use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult}; + + fn prepare_test_io() -> (H256, TestIo) { + let self_key_pair = Random.generate().unwrap(); + let peer_key_pair = Random.generate().unwrap(); + let mut io = TestIo::new(self_key_pair.clone(), peer_key_pair.public().clone()); + + let self_confirmation_plain = *Random.generate().unwrap().secret().clone(); + let peer_confirmation_plain = *Random.generate().unwrap().secret().clone(); + + let self_confirmation_signed = sign(peer_key_pair.secret(), &self_confirmation_plain).unwrap(); + let peer_confirmation_signed = sign(self_key_pair.secret(), &peer_confirmation_plain).unwrap(); + + io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: peer_key_pair.public().clone().into(), + confirmation_plain: peer_confirmation_plain.into(), + }))); + io.add_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: self_confirmation_signed.into(), + }))); + + io.add_output_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: self_key_pair.public().clone().into(), + confirmation_plain: self_confirmation_plain.clone().into(), + }))); + io.add_output_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: peer_confirmation_signed.into(), + }))); + + (self_confirmation_plain, io) + } + + #[test] + fn active_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let self_key_pair = io.self_key_pair().clone(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); + let shared_key = agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); + + let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), self_key_pair, trusted_nodes); + let handshake_result = handshake.wait().unwrap(); + assert_eq!(handshake_result.1, Ok(HandshakeResult { + node_id: handshake_result.0.peer_public().clone(), + shared_key: shared_key, + })); + handshake_result.0.assert_output(); + } + + #[test] + fn passive_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let self_key_pair = io.self_key_pair().clone(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); + let shared_key = agree(self_key_pair.secret(), io.peer_public()).unwrap(); + + let mut handshake = accept_handshake(io, self_key_pair, trusted_nodes); + handshake.set_self_confirmation_plain(self_confirmation_plain); + + let handshake_result = handshake.wait().unwrap(); + assert_eq!(handshake_result.1, Ok(HandshakeResult { + node_id: handshake_result.0.peer_public().clone(), + shared_key: shared_key, + })); + handshake_result.0.assert_output(); + } +} diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs new file mode 100644 index 000000000..bcabebf76 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -0,0 +1,247 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io::Cursor; +use std::u16; +use std::ops::Deref; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use serde_json; +use ethcrypto::ecdh::agree; +use ethkey::{Public, Secret}; +use key_server_cluster::Error; +use key_server_cluster::message::{Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; + +/// Size of serialized header. +pub const MESSAGE_HEADER_SIZE: usize = 4; + +#[derive(Debug, PartialEq)] +/// Message header. +pub struct MessageHeader { + /// Message/Header version. + pub version: u8, + /// Message kind. + pub kind: u8, + /// Message payload size (without header). + pub size: u16, +} + +#[derive(Debug, Clone, PartialEq)] +/// Serialized message. +pub struct SerializedMessage(Vec); + +impl Deref for SerializedMessage { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + &self.0 + } +} + +impl Into> for SerializedMessage { + fn into(self) -> Vec { + self.0 + } +} + +/// Serialize message. +pub fn serialize_message(message: Message) -> Result { + let (message_kind, payload) = match message { + Message::Cluster(ClusterMessage::NodePublicKey(payload)) => (1, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::NodePrivateKeySignature(payload)) => (2, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)), + + Message::Encryption(EncryptionMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::Complaint(payload)) => (54, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::ComplaintResponse(payload)) => (55, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::PublicKeyShare(payload)) => (56, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::SessionError(payload)) => (57, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::SessionCompleted(payload)) => (58, serde_json::to_vec(&payload)), + + Message::Decryption(DecryptionMessage::InitializeDecryptionSession(payload)) => (100, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (102, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (103, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (104, serde_json::to_vec(&payload)), + }; + + let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; + let payload_len = payload.len(); + if payload_len > u16::MAX as usize { + return Err(Error::InvalidMessage); + } + + let header = MessageHeader { + kind: message_kind, + version: 1, + size: payload_len as u16, + }; + + let mut serialized_message = serialize_header(&header)?; + serialized_message.extend(payload); + Ok(SerializedMessage(serialized_message)) +} + +/// Deserialize message. +pub fn deserialize_message(header: &MessageHeader, payload: Vec) -> Result { + Ok(match header.kind { + 1 => Message::Cluster(ClusterMessage::NodePublicKey(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 2 => Message::Cluster(ClusterMessage::NodePrivateKeySignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 3 => Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 50 => Message::Encryption(EncryptionMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 51 => Message::Encryption(EncryptionMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 52 => Message::Encryption(EncryptionMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 53 => Message::Encryption(EncryptionMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 54 => Message::Encryption(EncryptionMessage::Complaint(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 55 => Message::Encryption(EncryptionMessage::ComplaintResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 56 => Message::Encryption(EncryptionMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 57 => Message::Encryption(EncryptionMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 58 => Message::Encryption(EncryptionMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 100 => Message::Decryption(DecryptionMessage::InitializeDecryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 101 => Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 102 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 103 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 104 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + _ => return Err(Error::Serde(format!("unknown message type {}", header.kind))), + }) +} + +/// Encrypt serialized message. +pub fn encrypt_message(_key: &Secret, message: SerializedMessage) -> Result { + Ok(message) // TODO: implement me +} + +/// Decrypt serialized message. +pub fn decrypt_message(_key: &Secret, payload: Vec) -> Result, Error> { + Ok(payload) // TODO: implement me +} + +/// Compute shared encryption key. +pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result { + Ok(agree(self_secret, other_public)?) +} + +/// Serialize message header. +fn serialize_header(header: &MessageHeader) -> Result, Error> { + let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE); + buffer.write_u8(header.version)?; + buffer.write_u8(header.kind)?; + buffer.write_u16::(header.size)?; + Ok(buffer) +} + +/// Deserialize message header. +pub fn deserialize_header(data: &[u8]) -> Result { + let mut reader = Cursor::new(data); + Ok(MessageHeader { + version: reader.read_u8()?, + kind: reader.read_u8()?, + size: reader.read_u16::()?, + }) +} + +#[cfg(test)] +pub mod tests { + use std::io; + use ethkey::{KeyPair, Public}; + use key_server_cluster::message::Message; + use super::{MESSAGE_HEADER_SIZE, MessageHeader, serialize_message, serialize_header, deserialize_header}; + + pub struct TestIo { + self_key_pair: KeyPair, + peer_public: Public, + input_buffer: io::Cursor>, + output_buffer: Vec, + expected_output_buffer: Vec, + } + + impl TestIo { + pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self { + TestIo { + self_key_pair: self_key_pair, + peer_public: peer_public, + input_buffer: io::Cursor::new(Vec::new()), + output_buffer: Vec::new(), + expected_output_buffer: Vec::new(), + } + } + + pub fn self_key_pair(&self) -> &KeyPair { + &self.self_key_pair + } + + pub fn peer_public(&self) -> &Public { + &self.peer_public + } + + pub fn add_input_message(&mut self, message: Message) { + let serialized_message = serialize_message(message).unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + let input_buffer = self.input_buffer.get_mut(); + for b in serialized_message { + input_buffer.push(b); + } + } + + pub fn add_output_message(&mut self, message: Message) { + let serialized_message = serialize_message(message).unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + self.expected_output_buffer.extend(serialized_message); + } + + pub fn assert_output(&self) { + assert_eq!(self.output_buffer, self.expected_output_buffer); + } + } + + impl io::Read for TestIo { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + io::Read::read(&mut self.input_buffer, buf) + } + } + + impl io::Write for TestIo { + fn write(&mut self, buf: &[u8]) -> io::Result { + io::Write::write(&mut self.output_buffer, buf) + } + + fn flush(&mut self) -> io::Result<()> { + io::Write::flush(&mut self.output_buffer) + } + } + + #[test] + fn header_serialization_works() { + let header = MessageHeader { + kind: 1, + version: 2, + size: 3, + }; + + let serialized_header = serialize_header(&header).unwrap(); + assert_eq!(serialized_header.len(), MESSAGE_HEADER_SIZE); + + let deserialized_header = deserialize_header(&serialized_header).unwrap(); + assert_eq!(deserialized_header, header); + } +} diff --git a/secret_store/src/key_server_cluster/io/mod.rs b/secret_store/src/key_server_cluster/io/mod.rs new file mode 100644 index 000000000..57071038e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/mod.rs @@ -0,0 +1,34 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod deadline; +mod handshake; +mod message; +mod read_header; +mod read_payload; +mod read_message; +mod shared_tcp_stream; +mod write_message; + +pub use self::deadline::{deadline, Deadline, DeadlineStatus}; +pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult}; +pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message, + encrypt_message, compute_shared_key}; +pub use self::read_header::{read_header, ReadHeader}; +pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload}; +pub use self::read_message::{read_message, read_encrypted_message, ReadMessage}; +pub use self::shared_tcp_stream::SharedTcpStream; +pub use self::write_message::{write_message, write_encrypted_message, WriteMessage}; diff --git a/secret_store/src/key_server_cluster/io/read_header.rs b/secret_store/src/key_server_cluster/io/read_header.rs new file mode 100644 index 000000000..ab7ce360e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_header.rs @@ -0,0 +1,44 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Future, Poll, Async}; +use tokio_core::io::{ReadExact, read_exact}; +use key_server_cluster::Error; +use key_server_cluster::io::message::{MESSAGE_HEADER_SIZE, MessageHeader, deserialize_header}; + +/// Create future for read single message header from the stream. +pub fn read_header(a: A) -> ReadHeader where A: io::Read { + ReadHeader { + reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]), + } +} + +/// Future for read single message header from the stream. +pub struct ReadHeader { + reader: ReadExact, +} + +impl Future for ReadHeader where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let header = deserialize_header(&data); + Ok(Async::Ready((read, header))) + } +} diff --git a/secret_store/src/key_server_cluster/io/read_message.rs b/secret_store/src/key_server_cluster/io/read_message.rs new file mode 100644 index 000000000..418e5e31d --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_message.rs @@ -0,0 +1,86 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Poll, Future, Async}; +use ethkey::Secret; +use key_server_cluster::Error; +use key_server_cluster::message::Message; +use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload}; + +/// Create future for read single message from the stream. +pub fn read_message(a: A) -> ReadMessage where A: io::Read { + ReadMessage { + key: None, + state: ReadMessageState::ReadHeader(read_header(a)), + } +} + +/// Create future for read single encrypted message from the stream. +pub fn read_encrypted_message(a: A, key: Secret) -> ReadMessage where A: io::Read { + ReadMessage { + key: Some(key), + state: ReadMessageState::ReadHeader(read_header(a)), + } +} + +enum ReadMessageState { + ReadHeader(ReadHeader), + ReadPayload(ReadPayload), + Finished, +} + +/// Future for read single message from the stream. +pub struct ReadMessage { + key: Option, + state: ReadMessageState, +} + +impl Future for ReadMessage where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ReadMessageState::ReadHeader(ref mut future) => { + let (read, header) = try_ready!(future.poll()); + let header = match header { + Ok(header) => header, + Err(err) => return Ok((read, Err(err)).into()), + }; + + let future = match self.key.take() { + Some(key) => read_encrypted_payload(read, header, key), + None => read_payload(read, header), + }; + let next = ReadMessageState::ReadPayload(future); + (next, Async::NotReady) + }, + ReadMessageState::ReadPayload(ref mut future) => { + let (read, payload) = try_ready!(future.poll()); + (ReadMessageState::Finished, Async::Ready((read, payload))) + }, + ReadMessageState::Finished => panic!("poll ReadMessage after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} diff --git a/secret_store/src/key_server_cluster/io/read_payload.rs b/secret_store/src/key_server_cluster/io/read_payload.rs new file mode 100644 index 000000000..f6df3155e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_payload.rs @@ -0,0 +1,64 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Poll, Future}; +use tokio_core::io::{read_exact, ReadExact}; +use ethkey::Secret; +use key_server_cluster::Error; +use key_server_cluster::message::Message; +use key_server_cluster::io::message::{MessageHeader, deserialize_message, decrypt_message}; + +/// Create future for read single message payload from the stream. +pub fn read_payload(a: A, header: MessageHeader) -> ReadPayload where A: io::Read { + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: None, + } +} + +/// Create future for read single encrypted message payload from the stream. +pub fn read_encrypted_payload(a: A, header: MessageHeader, key: Secret) -> ReadPayload where A: io::Read { + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: Some(key), + } +} + +/// Future for read single message payload from the stream. +pub struct ReadPayload { + reader: ReadExact>, + header: MessageHeader, + key: Option, +} + +impl Future for ReadPayload where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let payload = if let Some(key) = self.key.take() { + decrypt_message(&key, data) + .and_then(|data| deserialize_message(&self.header, data)) + } else { + deserialize_message(&self.header, data) + }; + Ok((read, payload).into()) + } +} diff --git a/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs b/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs new file mode 100644 index 000000000..82933c8a2 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs @@ -0,0 +1,60 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::io::{Read, Write, Error}; +use tokio_core::net::TcpStream; + +/// Read+Write implementation for Arc. +pub struct SharedTcpStream { + io: Arc, +} + +impl SharedTcpStream { + pub fn new(a: Arc) -> Self { + SharedTcpStream { + io: a, + } + } +} + +impl From for SharedTcpStream { + fn from(a: TcpStream) -> Self { + SharedTcpStream::new(Arc::new(a)) + } +} + +impl Read for SharedTcpStream { + fn read(&mut self, buf: &mut [u8]) -> Result { + Read::read(&mut (&*self.io as &TcpStream), buf) + } +} + +impl Write for SharedTcpStream { + fn write(&mut self, buf: &[u8]) -> Result { + Write::write(&mut (&*self.io as &TcpStream), buf) + } + + fn flush(&mut self) -> Result<(), Error> { + Write::flush(&mut (&*self.io as &TcpStream)) + } +} + +impl Clone for SharedTcpStream { + fn clone(&self) -> Self { + SharedTcpStream::new(self.io.clone()) + } +} diff --git a/secret_store/src/key_server_cluster/io/write_message.rs b/secret_store/src/key_server_cluster/io/write_message.rs new file mode 100644 index 000000000..457673676 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/write_message.rs @@ -0,0 +1,70 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Future, Poll}; +use tokio_core::io::{WriteAll, write_all}; +use ethkey::Secret; +use key_server_cluster::message::Message; +use key_server_cluster::io::{serialize_message, encrypt_message}; + +/// Write plain message to the channel. +pub fn write_message(a: A, message: Message) -> WriteMessage where A: io::Write { + let (error, future) = match serialize_message(message) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; + WriteMessage { + error: error, + future: future, + } +} + +/// Write encrypted message to the channel. +pub fn write_encrypted_message(a: A, key: &Secret, message: Message) -> WriteMessage where A: io::Write { + let (error, future) = match serialize_message(message) + .and_then(|message| encrypt_message(key, message)) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; + + + WriteMessage { + error: error, + future: future, + } +} + +/// Future message write. +pub struct WriteMessage { + error: Option, + future: WriteAll>, +} + +impl Future for WriteMessage where A: io::Write { + type Item = (A, Vec); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + if let Some(err) = self.error.take() { + return Err(err); + } + + self.future.poll() + } +} diff --git a/secret_store/src/key_server_cluster/math.rs b/secret_store/src/key_server_cluster/math.rs index 4da17ebc7..fdda08746 100644 --- a/secret_store/src/key_server_cluster/math.rs +++ b/secret_store/src/key_server_cluster/math.rs @@ -160,7 +160,7 @@ pub fn compute_joint_secret<'a, I>(mut secret_coeffs: I) -> Result Result { +pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result { // this is performed by KS-cluster client (or KS master) let key_pair = Random.generate()?; @@ -171,7 +171,7 @@ pub fn encrypt_secret(secret: Public, joint_public: &Public) -> Result Result(node_number: &Secret, node_secret_share: &Secret, mut other_nodes_numbers: I) -> Result where I: Iterator { - let other_node_number = other_nodes_numbers.next().expect("compute_node_shadow is called when at least two nodes are required to decrypt secret; qed"); + let other_node_number = match other_nodes_numbers.next() { + Some(other_node_number) => other_node_number, + None => return Ok(node_secret_share.clone()), + }; + let mut shadow = node_number.clone(); shadow.sub(other_node_number)?; shadow.inv()?; @@ -231,17 +235,24 @@ pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: } /// Decrypt data using joint shadow point. -pub fn decrypt_with_joint_shadow(access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result { +pub fn decrypt_with_joint_shadow(threshold: usize, access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result { let mut inv_access_key = access_key.clone(); inv_access_key.inv()?; - - let mut decrypted_point = joint_shadow_point.clone(); - math::public_mul_secret(&mut decrypted_point, &inv_access_key)?; - math::public_add(&mut decrypted_point, encrypted_point)?; + + let mut mul = joint_shadow_point.clone(); + math::public_mul_secret(&mut mul, &inv_access_key)?; + + let mut decrypted_point = encrypted_point.clone(); + if threshold % 2 != 0 { + math::public_add(&mut decrypted_point, &mul)?; + } else { + math::public_sub(&mut decrypted_point, &mul)?; + } Ok(decrypted_point) } +#[cfg(test)] /// Decrypt data using joint secret (version for tests). pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result { let mut common_point_mul = common_point.clone(); @@ -262,7 +273,7 @@ pub mod tests { // === PART2: encryption using joint public key === // the next line is executed on KeyServer-client - let encrypted_secret = encrypt_secret(document_secret_plain.clone(), &joint_public).unwrap(); + let encrypted_secret = encrypt_secret(&document_secret_plain, &joint_public).unwrap(); // === PART3: decryption === @@ -285,7 +296,7 @@ pub mod tests { assert_eq!(joint_shadow_point, joint_shadow_point_test); // decrypt encrypted secret using joint shadow point - let document_secret_decrypted = decrypt_with_joint_shadow(&access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); + let document_secret_decrypted = decrypt_with_joint_shadow(t, &access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); // decrypt encrypted secret using joint secret [just for test] let document_secret_decrypted_test = match joint_secret { @@ -298,7 +309,8 @@ pub mod tests { #[test] fn full_encryption_math_session() { - let test_cases = [(1, 3)]; + let test_cases = [(0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), + (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; for &(t, n) in &test_cases { // === PART1: DKG === diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index 800dcf705..9958884a4 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -14,13 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::fmt; use std::collections::{BTreeSet, BTreeMap}; -use ethkey::{Public, Secret, Signature}; -use key_server_cluster::{NodeId, SessionId}; +use ethkey::Secret; +use key_server_cluster::SessionId; +use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature}; + +pub type MessageSessionId = SerializableH256; +pub type MessageNodeId = SerializablePublic; #[derive(Clone, Debug)] -/// All possible messages that can be sent during DKG. +/// All possible messages that can be sent during encryption/decryption sessions. pub enum Message { + /// Cluster message. + Cluster(ClusterMessage), + /// Encryption message. + Encryption(EncryptionMessage), + /// Decryption message. + Decryption(DecryptionMessage), +} + +#[derive(Clone, Debug)] +/// All possible cluster-level messages. +pub enum ClusterMessage { + /// Introduce node public key. + NodePublicKey(NodePublicKey), + /// Confirm that node owns its private key. + NodePrivateKeySignature(NodePrivateKeySignature), + /// Keep alive message. + KeepAlive(KeepAlive), + /// Keep alive message response. + KeepAliveResponse(KeepAliveResponse), +} + +#[derive(Clone, Debug)] +/// All possible messages that can be sent during encryption session. +pub enum EncryptionMessage { /// Initialize new DKG session. InitializeSession(InitializeSession), /// Confirm DKG session initialization. @@ -35,7 +64,15 @@ pub enum Message { ComplaintResponse(ComplaintResponse), /// Broadcast self public key portion. PublicKeyShare(PublicKeyShare), + /// When session error has occured. + SessionError(SessionError), + /// When session is completed. + SessionCompleted(SessionCompleted), +} +#[derive(Clone, Debug)] +/// All possible messages that can be sent during decryption session. +pub enum DecryptionMessage { /// Initialize decryption session. InitializeDecryptionSession(InitializeDecryptionSession), /// Confirm/reject decryption session initialization. @@ -44,125 +81,272 @@ pub enum Message { RequestPartialDecryption(RequestPartialDecryption), /// Partial decryption is completed PartialDecryption(PartialDecryption), + /// When decryption session error has occured. + DecryptionSessionError(DecryptionSessionError), } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Introduce node public key. +pub struct NodePublicKey { + /// Node identifier (aka node public key). + pub node_id: MessageNodeId, + /// Data, which must be signed by peer to prove that he owns the corresponding private key. + pub confirmation_plain: SerializableH256, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Confirm that node owns the private key of previously passed public key (aka node id). +pub struct NodePrivateKeySignature { + /// Previously passed `confirmation_plain`, signed with node private key. + pub confirmation_signed: SerializableSignature, +} + + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Ask if the node is still alive. +pub struct KeepAlive { +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Confirm that the node is still alive. +pub struct KeepAliveResponse { +} + +#[derive(Clone, Debug, Serialize, Deserialize)] /// Initialize new DKG session. pub struct InitializeSession { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Derived generation point. Starting from originator, every node must multiply this /// point by random scalar (unknown by other nodes). At the end of initialization /// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)` /// is unknown for every node. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Confirm DKG session initialization. pub struct ConfirmInitialization { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Derived generation point. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Broadcast generated point to every other node. pub struct CompleteInitialization { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// All session participants along with their identification numbers. - pub nodes: BTreeMap, + pub nodes: BTreeMap, /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to /// consensus to successfully decrypt message. pub threshold: usize, /// Derived generation point. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Generated keys are sent to every node. pub struct KeysDissemination { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Secret 1. - pub secret1: Secret, + pub secret1: SerializableSecret, /// Secret 2. - pub secret2: Secret, + pub secret2: SerializableSecret, /// Public values. - pub publics: Vec, + pub publics: Vec, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Complaint against node is broadcasted. pub struct Complaint { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Public values. - pub against: NodeId, + pub against: MessageNodeId, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to complaint. pub struct ComplaintResponse { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Secret 1. - pub secret1: Secret, + pub secret1: SerializableSecret, /// Secret 2. - pub secret2: Secret, + pub secret2: SerializableSecret, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is sharing its public key share. pub struct PublicKeyShare { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Public key share. - pub public_share: Public, + pub public_share: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When session error has occured. +pub struct SessionError { + /// Session Id. + pub session: MessageSessionId, + /// Public key share. + pub error: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When session is completed. +pub struct SessionCompleted { + /// Session Id. + pub session: MessageSessionId, + /// Common (shared) encryption point. + pub common_point: SerializablePublic, + /// Encrypted point. + pub encrypted_point: SerializablePublic, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to decrypt data, encrypted in given session. pub struct InitializeDecryptionSession { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Requestor signature. - pub requestor_signature: Signature, + pub requestor_signature: SerializableSignature, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to decryption request. pub struct ConfirmDecryptionInitialization { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Is node confirmed to make a decryption?. pub is_confirmed: bool, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to do a partial decryption. pub struct RequestPartialDecryption { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Nodes that are agreed to do a decryption. - pub nodes: BTreeSet, + pub nodes: BTreeSet, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node has partially decrypted the secret. pub struct PartialDecryption { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Partially decrypted secret. - pub shadow_point: Public, + pub shadow_point: SerializablePublic, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When decryption session error has occured. +pub struct DecryptionSessionError { + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Public key share. + pub error: String, +} + +impl EncryptionMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + EncryptionMessage::InitializeSession(ref msg) => &msg.session, + EncryptionMessage::ConfirmInitialization(ref msg) => &msg.session, + EncryptionMessage::CompleteInitialization(ref msg) => &msg.session, + EncryptionMessage::KeysDissemination(ref msg) => &msg.session, + EncryptionMessage::Complaint(ref msg) => &msg.session, + EncryptionMessage::ComplaintResponse(ref msg) => &msg.session, + EncryptionMessage::PublicKeyShare(ref msg) => &msg.session, + EncryptionMessage::SessionError(ref msg) => &msg.session, + EncryptionMessage::SessionCompleted(ref msg) => &msg.session, + } + } +} + +impl DecryptionMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.session, + DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session, + } + } + + pub fn sub_session_id(&self) -> &Secret { + match *self { + DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.sub_session, + DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.sub_session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session, + } + } +} + +impl fmt::Display for Message { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Message::Cluster(ref message) => write!(f, "Cluster.{}", message), + Message::Encryption(ref message) => write!(f, "Encryption.{}", message), + Message::Decryption(ref message) => write!(f, "Decryption.{}", message), + } + } +} + +impl fmt::Display for ClusterMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ClusterMessage::NodePublicKey(_) => write!(f, "NodePublicKey"), + ClusterMessage::NodePrivateKeySignature(_) => write!(f, "NodePrivateKeySignature"), + ClusterMessage::KeepAlive(_) => write!(f, "KeepAlive"), + ClusterMessage::KeepAliveResponse(_) => write!(f, "KeepAliveResponse"), + } + } +} + +impl fmt::Display for EncryptionMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + EncryptionMessage::InitializeSession(_) => write!(f, "InitializeSession"), + EncryptionMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"), + EncryptionMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"), + EncryptionMessage::KeysDissemination(_) => write!(f, "KeysDissemination"), + EncryptionMessage::Complaint(_) => write!(f, "Complaint"), + EncryptionMessage::ComplaintResponse(_) => write!(f, "ComplaintResponse"), + EncryptionMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"), + EncryptionMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error), + EncryptionMessage::SessionCompleted(_) => write!(f, "SessionCompleted"), + } + } +} + +impl fmt::Display for DecryptionMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + DecryptionMessage::InitializeDecryptionSession(_) => write!(f, "InitializeDecryptionSession"), + DecryptionMessage::ConfirmDecryptionInitialization(_) => write!(f, "ConfirmDecryptionInitialization"), + DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"), + DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"), + DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"), + } + } } diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 5d0dacd11..8b33e06f7 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -14,21 +14,36 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#![allow(dead_code)] // TODO: remove me - -use std::collections::BTreeMap; -use ethkey::{self, Public, Secret, Signature}; +use std::fmt; +use std::io::Error as IoError; +use ethkey; +use ethcrypto; use super::types::all::DocumentAddress; -pub use super::acl_storage::AclStorage; +pub use super::types::all::{NodeId, EncryptionConfiguration}; +pub use super::acl_storage::{AclStorage, DummyAclStorage}; +pub use super::key_storage::{KeyStorage, DocumentKeyShare}; +pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic}; +pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; +pub use self::encryption_session::Session as EncryptionSession; +pub use self::decryption_session::Session as DecryptionSession; + +#[cfg(test)] +pub use super::key_storage::tests::DummyKeyStorage; -pub type NodeId = Public; pub type SessionId = DocumentAddress; -pub type SessionIdSignature = Signature; -#[derive(Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] /// Errors which can occur during encryption/decryption session pub enum Error { + /// Invalid node address has been passed. + InvalidNodeAddress, + /// Invalid node id has been passed. + InvalidNodeId, + /// Session with the given id already exists. + DuplicateSessionId, + /// Session with the given id is unknown. + InvalidSessionId, /// Invalid number of nodes. /// There must be at least two nodes participating in encryption. /// There must be at least one node participating in decryption. @@ -39,28 +54,24 @@ pub enum Error { /// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption. InvalidThreshold, /// Current state of encryption/decryption session does not allow to proceed request. + /// Reschedule this request for later processing. + TooEarlyForRequest, + /// Current state of encryption/decryption session does not allow to proceed request. /// This means that either there is some comm-failure or node is misbehaving/cheating. InvalidStateForRequest, - /// Some data in passed message was recognized as invalid. + /// Message or some data in the message was recognized as invalid. /// This means that node is misbehaving/cheating. InvalidMessage, + /// Connection to node, required for this session is not established. + NodeDisconnected, /// Cryptographic error. EthKey(String), -} - -#[derive(Debug, Clone)] -/// Data, which is stored on every node after DKG && encryption is completed. -pub struct EncryptedData { - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - threshold: usize, - /// Nodes ids numbers. - id_numbers: BTreeMap, - /// Node secret share. - secret_share: Secret, - /// Common (shared) encryption point. - common_point: Public, - /// Encrypted point. - encrypted_point: Public, + /// I/O error has occured. + Io(String), + /// Deserialization error has occured. + Serde(String), + /// Key storage error. + KeyStorage(String), } impl From for Error { @@ -69,8 +80,50 @@ impl From for Error { } } +impl From for Error { + fn from(err: ethcrypto::Error) -> Self { + Error::EthKey(err.into()) + } +} + +impl From for Error { + fn from(err: IoError) -> Self { + Error::Io(err.to_string()) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"), + Error::InvalidNodeId => write!(f, "invalid node id has been passed"), + Error::DuplicateSessionId => write!(f, "session with the same id is already registered"), + Error::InvalidSessionId => write!(f, "invalid session id has been passed"), + Error::InvalidNodesCount => write!(f, "invalid nodes count"), + Error::InvalidNodesConfiguration => write!(f, "invalid nodes configuration"), + Error::InvalidThreshold => write!(f, "invalid threshold value has been passed"), + Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"), + Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"), + Error::InvalidMessage => write!(f, "invalid message is received"), + Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"), + Error::EthKey(ref e) => write!(f, "cryptographic error {}", e), + Error::Io(ref e) => write!(f, "i/o error {}", e), + Error::Serde(ref e) => write!(f, "serde error {}", e), + Error::KeyStorage(ref e) => write!(f, "key storage error {}", e), + } + } +} + +impl Into for Error { + fn into(self) -> String { + format!("{}", self) + } +} + mod cluster; mod decryption_session; mod encryption_session; +mod io; mod math; mod message; +mod net; diff --git a/secret_store/src/key_server_cluster/net/accept_connection.rs b/secret_store/src/key_server_cluster/net/accept_connection.rs new file mode 100644 index 000000000..0daa8b2da --- /dev/null +++ b/secret_store/src/key_server_cluster/net/accept_connection.rs @@ -0,0 +1,63 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::net::SocketAddr; +use std::time::Duration; +use std::collections::BTreeSet; +use futures::{Future, Poll}; +use tokio_core::reactor::Handle; +use tokio_core::net::TcpStream; +use ethkey::KeyPair; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; +use key_server_cluster::net::Connection; + +/// Create future for accepting incoming connection. +pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { + let accept = AcceptConnection { + handshake: accept_handshake(stream, self_key_pair, trusted_nodes), + address: address, + }; + + deadline(Duration::new(5, 0), handle, accept).expect("Failed to create timeout") +} + +/// Future for accepting incoming connection. +pub struct AcceptConnection { + handshake: Handshake, + address: SocketAddr, +} + +impl Future for AcceptConnection { + type Item = Result; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (stream, result) = try_ready!(self.handshake.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Err(err).into()), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + Ok(Ok(connection).into()) + } +} diff --git a/secret_store/src/key_server_cluster/net/connect.rs b/secret_store/src/key_server_cluster/net/connect.rs new file mode 100644 index 000000000..449168ab2 --- /dev/null +++ b/secret_store/src/key_server_cluster/net/connect.rs @@ -0,0 +1,90 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::BTreeSet; +use std::io; +use std::time::Duration; +use std::net::SocketAddr; +use futures::{Future, Poll, Async}; +use tokio_core::reactor::Handle; +use tokio_core::net::{TcpStream, TcpStreamNew}; +use ethkey::KeyPair; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; +use key_server_cluster::net::Connection; + +/// Create future for connecting to other node. +pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { + let connect = Connect { + state: ConnectState::TcpConnect(TcpStream::connect(address, handle)), + address: address.clone(), + self_key_pair: self_key_pair, + trusted_nodes: trusted_nodes, + }; + + deadline(Duration::new(5, 0), handle, connect).expect("Failed to create timeout") +} + +enum ConnectState { + TcpConnect(TcpStreamNew), + Handshake(Handshake), + Connected, +} + +/// Future for connecting to other node. +pub struct Connect { + state: ConnectState, + address: SocketAddr, + self_key_pair: KeyPair, + trusted_nodes: BTreeSet, +} + +impl Future for Connect { + type Item = Result; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ConnectState::TcpConnect(ref mut future) => { + let stream = try_ready!(future.poll()); + let handshake = handshake(stream, self.self_key_pair.clone(), self.trusted_nodes.clone()); + (ConnectState::Handshake(handshake), Async::NotReady) + }, + ConnectState::Handshake(ref mut future) => { + let (stream, result) = try_ready!(future.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Async::Ready(Err(err))), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + (ConnectState::Connected, Async::Ready(Ok(connection))) + }, + ConnectState::Connected => panic!("poll Connect after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} diff --git a/secret_store/src/key_server_cluster/net/connection.rs b/secret_store/src/key_server_cluster/net/connection.rs new file mode 100644 index 000000000..8125b81d3 --- /dev/null +++ b/secret_store/src/key_server_cluster/net/connection.rs @@ -0,0 +1,32 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::net; +use ethkey::Secret; +use key_server_cluster::NodeId; +use key_server_cluster::io::SharedTcpStream; + +/// Established connection data +pub struct Connection { + /// Peer address. + pub address: net::SocketAddr, + /// Connection stream. + pub stream: SharedTcpStream, + /// Peer node id. + pub node_id: NodeId, + /// Encryption key. + pub key: Secret, +} diff --git a/secret_store/src/key_server_cluster/net/mod.rs b/secret_store/src/key_server_cluster/net/mod.rs new file mode 100644 index 000000000..6abf83ceb --- /dev/null +++ b/secret_store/src/key_server_cluster/net/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod accept_connection; +mod connect; +mod connection; + +pub use self::accept_connection::{AcceptConnection, accept_connection}; +pub use self::connect::{Connect, connect}; +pub use self::connection::Connection; diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index fe7777410..e3106f221 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -15,15 +15,34 @@ // along with Parity. If not, see . use std::path::PathBuf; +use std::collections::BTreeMap; +use serde_json; +use ethkey::{Secret, Public}; use util::Database; -use types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; +use types::all::{Error, ServiceConfiguration, DocumentAddress, NodeId}; +use serialization::{SerializablePublic, SerializableSecret}; + +#[derive(Debug, Clone, PartialEq)] +/// Encrypted key share, stored by key storage on the single key server. +pub struct DocumentKeyShare { + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: Secret, + /// Common (shared) encryption point. + pub common_point: Public, + /// Encrypted point. + pub encrypted_point: Public, +} /// Document encryption keys storage pub trait KeyStorage: Send + Sync { /// Insert document encryption key - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error>; + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error>; /// Get document encryption key - fn get(&self, document: &DocumentAddress) -> Result; + fn get(&self, document: &DocumentAddress) -> Result; } /// Persistent document encryption keys storage @@ -31,6 +50,21 @@ pub struct PersistentKeyStorage { db: Database, } +#[derive(Serialize, Deserialize)] +/// Encrypted key share, as it is stored by key storage on the single key server. +struct SerializableDocumentKeyShare { + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: SerializablePublic, + /// Encrypted point. + pub encrypted_point: SerializablePublic, +} + impl PersistentKeyStorage { /// Create new persistent document encryption keys storage pub fn new(config: &ServiceConfiguration) -> Result { @@ -45,41 +79,71 @@ impl PersistentKeyStorage { } impl KeyStorage for PersistentKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { + let key: SerializableDocumentKeyShare = key.into(); + let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?; let mut batch = self.db.transaction(); batch.put(None, &document, &key); self.db.write(batch).map_err(Error::Database) } - fn get(&self, document: &DocumentAddress) -> Result { + fn get(&self, document: &DocumentAddress) -> Result { self.db.get(None, document) .map_err(Error::Database)? .ok_or(Error::DocumentNotFound) .map(|key| key.to_vec()) + .and_then(|key| serde_json::from_slice::(&key).map_err(|e| Error::Database(e.to_string()))) + .map(Into::into) + } +} + +impl From for SerializableDocumentKeyShare { + fn from(key: DocumentKeyShare) -> Self { + SerializableDocumentKeyShare { + threshold: key.threshold, + id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + secret_share: key.secret_share.into(), + common_point: key.common_point.into(), + encrypted_point: key.encrypted_point.into(), + } + } +} + +impl From for DocumentKeyShare { + fn from(key: SerializableDocumentKeyShare) -> Self { + DocumentKeyShare { + threshold: key.threshold, + id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + secret_share: key.secret_share.into(), + common_point: key.common_point.into(), + encrypted_point: key.encrypted_point.into(), + } } } #[cfg(test)] pub mod tests { - use std::collections::HashMap; + use std::collections::{BTreeMap, HashMap}; use parking_lot::RwLock; use devtools::RandomTempPath; - use super::super::types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; - use super::{KeyStorage, PersistentKeyStorage}; + use ethkey::{Random, Generator}; + use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, + DocumentAddress, EncryptionConfiguration}; + use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare}; #[derive(Default)] /// In-memory document encryption keys storage pub struct DummyKeyStorage { - keys: RwLock>, + keys: RwLock>, } impl KeyStorage for DummyKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { self.keys.write().insert(document, key); Ok(()) } - fn get(&self, document: &DocumentAddress) -> Result { + fn get(&self, document: &DocumentAddress) -> Result { self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound) } } @@ -88,15 +152,46 @@ pub mod tests { fn persistent_key_storage() { let path = RandomTempPath::create_dir(); let config = ServiceConfiguration { - listener_addr: "0.0.0.0".to_owned(), - listener_port: 8082, + listener_address: NodeAddress { + address: "0.0.0.0".to_owned(), + port: 8082, + }, data_path: path.as_str().to_owned(), + cluster_config: ClusterConfiguration { + threads: 1, + self_private: (**Random.generate().unwrap().secret().clone()).into(), + listener_address: NodeAddress { + address: "0.0.0.0".to_owned(), + port: 8083, + }, + nodes: BTreeMap::new(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + }, }; let key1 = DocumentAddress::from(1); - let value1: DocumentKey = vec![0x77, 0x88]; + let value1 = DocumentKeyShare { + threshold: 100, + id_numbers: vec![ + (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) + ].into_iter().collect(), + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Random.generate().unwrap().public().clone(), + encrypted_point: Random.generate().unwrap().public().clone(), + }; let key2 = DocumentAddress::from(2); - let value2: DocumentKey = vec![0x11, 0x22]; + let value2 = DocumentKeyShare { + threshold: 200, + id_numbers: vec![ + (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) + ].into_iter().collect(), + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Random.generate().unwrap().public().clone(), + encrypted_point: Random.generate().unwrap().public().clone(), + }; let key3 = DocumentAddress::from(3); let key_storage = PersistentKeyStorage::new(&config).unwrap(); diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 41d658963..bbb8474d4 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -14,10 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +extern crate byteorder; #[macro_use] extern crate log; +#[macro_use] +extern crate futures; +extern crate futures_cpupool; extern crate hyper; extern crate parking_lot; +extern crate rustc_serialize; +extern crate serde; +extern crate serde_json; +#[macro_use] +extern crate serde_derive; +extern crate tokio_core; +extern crate tokio_service; +extern crate tokio_proto; extern crate url; extern crate ethcore_devtools as devtools; @@ -38,16 +50,19 @@ mod acl_storage; mod http_listener; mod key_server; mod key_storage; +mod serialization; pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public, - Error, ServiceConfiguration}; + Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, EncryptionConfiguration}; pub use traits::{KeyServer}; /// Start new key server instance pub fn start(config: ServiceConfiguration) -> Result, Error> { - let acl_storage = acl_storage::DummyAclStorage::default(); - let key_storage = key_storage::PersistentKeyStorage::new(&config)?; - let key_server = key_server::KeyServerImpl::new(acl_storage, key_storage); + use std::sync::Arc; + + let acl_storage = Arc::new(acl_storage::DummyAclStorage::default()); + let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); + let key_server = key_server::KeyServerImpl::new(&config.cluster_config, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(config, key_server)?; Ok(Box::new(listener)) } diff --git a/secret_store/src/serialization.rs b/secret_store/src/serialization.rs new file mode 100644 index 000000000..0d0e904a7 --- /dev/null +++ b/secret_store/src/serialization.rs @@ -0,0 +1,260 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fmt; +use std::cmp::{Ord, PartialOrd, Ordering}; +use std::ops::Deref; +use rustc_serialize::hex::ToHex; +use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::de::{Visitor, Error as SerdeError}; +use ethkey::{Public, Secret, Signature}; +use util::H256; + +#[derive(Clone, Debug)] +/// Serializable Signature. +pub struct SerializableSignature(Signature); + +impl From for SerializableSignature where Signature: From { + fn from(s: T) -> SerializableSignature { + SerializableSignature(s.into()) + } +} + +impl Into for SerializableSignature { + fn into(self) -> Signature { + self.0 + } +} + +impl Deref for SerializableSignature { + type Target = Signature; + + fn deref(&self) -> &Signature { + &self.0 + } +} + +impl Serialize for SerializableSignature { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableSignature { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableSignature; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded Signature") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableSignature(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable H256. +pub struct SerializableH256(H256); + +impl From for SerializableH256 where H256: From { + fn from(s: T) -> SerializableH256 { + SerializableH256(s.into()) + } +} + +impl Into for SerializableH256 { + fn into(self) -> H256 { + self.0 + } +} + +impl Deref for SerializableH256 { + type Target = H256; + + fn deref(&self) -> &H256 { + &self.0 + } +} + +impl Serialize for SerializableH256 { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableH256 { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableH256; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded H256") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableH256(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable EC scalar/secret key. +pub struct SerializableSecret(Secret); + +impl From for SerializableSecret where Secret: From { + fn from(s: T) -> SerializableSecret { + SerializableSecret(s.into()) + } +} + +impl Into for SerializableSecret { + fn into(self) -> Secret { + self.0 + } +} + +impl Deref for SerializableSecret { + type Target = Secret; + + fn deref(&self) -> &Secret { + &self.0 + } +} + +impl Serialize for SerializableSecret { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableSecret { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableSecret; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded EC scalar") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableSecret(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable EC point/public key. +pub struct SerializablePublic(Public); + +impl From for SerializablePublic where Public: From { + fn from(p: T) -> SerializablePublic { + SerializablePublic(p.into()) + } +} + +impl Into for SerializablePublic { + fn into(self) -> Public { + self.0 + } +} + +impl Deref for SerializablePublic { + type Target = Public; + + fn deref(&self) -> &Public { + &self.0 + } +} + +impl Eq for SerializablePublic { } + +impl PartialEq for SerializablePublic { + fn eq(&self, other: &SerializablePublic) -> bool { + self.0.eq(&other.0) + } +} + +impl Ord for SerializablePublic { + fn cmp(&self, other: &SerializablePublic) -> Ordering { + self.0.cmp(&other.0) + } +} + +impl PartialOrd for SerializablePublic { + fn partial_cmp(&self, other: &SerializablePublic) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Serialize for SerializablePublic { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializablePublic { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializablePublic; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded EC point") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializablePublic(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} diff --git a/secret_store/src/traits.rs b/secret_store/src/traits.rs index 9a68e9c4d..1a407e5c7 100644 --- a/secret_store/src/traits.rs +++ b/secret_store/src/traits.rs @@ -19,6 +19,8 @@ use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey} #[ipc(client_ident="RemoteKeyServer")] /// Secret store key server pub trait KeyServer: Send + Sync { + /// Generate encryption key for given document. + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result; /// Request encryption key of given document for given requestor fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result; } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index f318e6543..514b4eb6b 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -15,10 +15,14 @@ // along with Parity. If not, see . use std::fmt; +use std::collections::BTreeMap; use ethkey; use util; +use key_server_cluster; +/// Node id. +pub type NodeId = ethkey::Public; /// Document address type. pub type DocumentAddress = util::H256; /// Document key type. @@ -46,16 +50,53 @@ pub enum Error { Internal(String), } +#[derive(Debug)] +#[binary] +/// Secret store configuration +pub struct NodeAddress { + /// IP address. + pub address: String, + /// IP port. + pub port: u16, +} + #[derive(Debug)] #[binary] /// Secret store configuration pub struct ServiceConfiguration { - /// Interface to listen to - pub listener_addr: String, - /// Port to listen to - pub listener_port: u16, + /// HTTP listener address. + pub listener_address: NodeAddress, /// Data directory path for secret store pub data_path: String, + /// Cluster configuration. + pub cluster_config: ClusterConfiguration, +} + +#[derive(Debug)] +#[binary] +/// Key server cluster configuration +pub struct ClusterConfiguration { + /// Number of threads reserved by cluster. + pub threads: usize, + /// Private key this node holds. + pub self_private: Vec, // holds ethkey::Secret + /// This node address. + pub listener_address: NodeAddress, + /// All cluster nodes addresses. + pub nodes: BTreeMap, + /// Allow outbound connections to 'higher' nodes. + /// This is useful for tests, but slower a bit for production. + pub allow_connecting_to_higher_nodes: bool, + /// Encryption session configuration. + pub encryption_config: EncryptionConfiguration, +} + +#[derive(Clone, Debug)] +#[binary] +/// Encryption parameters. +pub struct EncryptionConfiguration { + /// Key check timeout. + pub key_check_timeout_ms: u64, } impl fmt::Display for Error { @@ -70,6 +111,18 @@ impl fmt::Display for Error { } } +impl From for Error { + fn from(err: ethkey::Error) -> Self { + Error::Internal(err.into()) + } +} + +impl From for Error { + fn from(err: key_server_cluster::Error) -> Self { + Error::Internal(err.into()) + } +} + impl Into for Error { fn into(self) -> String { format!("{}", self) From 2447875b269ac47ffe14b653d4a42b6da0fc89e5 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Mon, 3 Apr 2017 12:16:41 +0200 Subject: [PATCH 79/91] Update npm build for new inclusions (#5381) * Update npm build for new inclusions * Add dummy interface for secp256k1 --- js/src/api/local/ethkey/dummy.js | 19 +++++++++++++++++++ js/webpack/npm.js | 13 +++++++++---- 2 files changed, 28 insertions(+), 4 deletions(-) create mode 100644 js/src/api/local/ethkey/dummy.js diff --git a/js/src/api/local/ethkey/dummy.js b/js/src/api/local/ethkey/dummy.js new file mode 100644 index 000000000..38f7c84de --- /dev/null +++ b/js/src/api/local/ethkey/dummy.js @@ -0,0 +1,19 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +export default function () { + // empty file included while building parity.js (don't include local keygen) +} diff --git a/js/webpack/npm.js b/js/webpack/npm.js index b1f41d805..b526b2f0f 100644 --- a/js/webpack/npm.js +++ b/js/webpack/npm.js @@ -24,9 +24,11 @@ const ENV = process.env.NODE_ENV || 'development'; const isProd = ENV === 'production'; const LIBRARY = process.env.LIBRARY; + if (!LIBRARY) { process.exit(-1); } + const SRC = LIBRARY.toLowerCase(); const OUTPUT_PATH = path.join(__dirname, '../.npmjs', SRC); @@ -63,12 +65,18 @@ module.exports = { 'babel-loader?cacheDirectory=true' ], exclude: /node_modules/ + }, + { + test: /\.js$/, + include: /node_modules\/(ethereumjs-tx|@parity\/wordlist)/, + use: 'babel-loader' } ] }, resolve: { alias: { + 'secp256k1/js': path.resolve(__dirname, '../src/api/local/ethkey/dummy.js'), '~': path.resolve(__dirname, '../src') }, modules: [ @@ -85,15 +93,12 @@ module.exports = { to: 'package.json', transform: function (content, path) { const json = JSON.parse(content.toString()); - json.version = packageJson.version; - // Add tests dependencies to Dev Deps json.devDependencies.chai = packageJson.devDependencies.chai; json.devDependencies.mocha = packageJson.devDependencies.mocha; json.devDependencies.nock = packageJson.devDependencies.nock; - - // Add test script json.scripts.test = 'mocha \'test/*.spec.js\''; + json.version = packageJson.version; return new Buffer(JSON.stringify(json, null, ' '), 'utf-8'); } From 0a90f235de18d381a938a91a6cc76fab25bd88e5 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Mon, 3 Apr 2017 10:39:53 +0000 Subject: [PATCH 80/91] [ci skip] js-precompiled 20170403-103604 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aecf8960f..36d76cc01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1752,7 +1752,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#6028c355854797a5938c26f5d2b2faf10d8833d7" +source = "git+https://github.com/paritytech/js-precompiled.git#6867dd71b2064f32ff53c84663897da6b4b13927" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 62803cc70..d2d5ec0ca 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.43", + "version": "1.7.44", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 0f65779d29814e0275d49036a2855562f92b89a0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Apr 2017 12:54:27 +0200 Subject: [PATCH 81/91] fix indentation in usage.txt --- parity/cli/usage.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index f970887f7..1e5f3c0fb 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -72,9 +72,9 @@ Operating Options: --identity NAME Specify your node's name. (default: {flag_identity}) --light Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary - data on-demand from the network. Much lower in storage, - potentially higher in bandwidth. Has no effect with - subcommands (default: {flag_light}) + data on-demand from the network. Much lower in storage, + potentially higher in bandwidth. Has no effect with + subcommands (default: {flag_light}). Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. From 848779e465655268da4640fbcf6dae5378a16977 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Mon, 3 Apr 2017 12:56:07 +0200 Subject: [PATCH 82/91] Allow empty-encoded values from encoding (#5385) --- js/src/util/qrscan.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/js/src/util/qrscan.js b/js/src/util/qrscan.js index eabc95409..f3cf2f9e9 100644 --- a/js/src/util/qrscan.js +++ b/js/src/util/qrscan.js @@ -92,9 +92,9 @@ export function generateQr (from, tx, hash, rlp) { account: from.substr(2), hash: hash.substr(2), details: { - gasPrice: inNumber10(inHex(tx.gasPrice.toString('hex'))), - gas: inNumber10(inHex(tx.gasLimit.toString('hex'))), - nonce: inNumber10(inHex(tx.nonce.toString('hex'))), + gasPrice: inNumber10(inHex(tx.gasPrice.toString('hex') || '0')), + gas: inNumber10(inHex(tx.gasLimit.toString('hex') || '0')), + nonce: inNumber10(inHex(tx.nonce.toString('hex') || '0')), to: inAddress(tx.to.toString('hex')), value: inHex(tx.value.toString('hex') || '0') } From ee4f9da3859d3f4dc7620ebd5d0b2092ede066e9 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Mon, 3 Apr 2017 11:24:51 +0000 Subject: [PATCH 83/91] [ci skip] js-precompiled 20170403-112007 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36d76cc01..409dbc418 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1752,7 +1752,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#6867dd71b2064f32ff53c84663897da6b4b13927" +source = "git+https://github.com/paritytech/js-precompiled.git#04143247380a7a9bce112c9467636684d8214973" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index d2d5ec0ca..82807e820 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.44", + "version": "1.7.45", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From abec06f50c95dc4382a5988096ff0135b607ee23 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 3 Apr 2017 18:46:51 +0300 Subject: [PATCH 84/91] On-chain ACL checker for secretstore (#5015) * ECDKG protocol prototype * added test for enc/dec math * get rid of decryption_session * added licenses * fix after merge * get rid of unused serde dependency * doc * decryption session [without commutative enc] * failed_dec_session * fixed tests * added commen * added more decryption session tests * helper to localize an issue * more computations to localize error * decryption_session::SessionParams * added tests for EC math to localize problem * secretstore network transport * encryption_session_works_over_network * network errors processing * connecting to KeyServer * licenses * get rid of debug println-s * fixed secretstore args * encryption results are stored in KS database * decryption protocol works over network * enc/dec Session traits * fixing warnings * fix after merge * on-chain ACL checker proto * fixed compilation * fixed compilation * finally fixed -of-N-scheme * temporary commented test * 1-of-N works in math * scheme 1-of-N works * updated AclStorage with real contract ABI * remove unnecessary unsafety * fixed grumbles * wakeup on access denied * fix after merge * fix after merge * moved contract to native-contracts lib --- Cargo.lock | 3 + ethcore/native_contracts/build.rs | 2 + ethcore/native_contracts/src/lib.rs | 2 + .../src/secretstore_acl_storage.rs | 22 +++++ parity/run.rs | 4 +- parity/secretstore.rs | 10 +- secret_store/Cargo.toml | 3 + secret_store/src/acl_storage.rs | 92 +++++++++++++++---- secret_store/src/key_server.rs | 2 +- .../key_server_cluster/decryption_session.rs | 10 +- secret_store/src/key_server_cluster/mod.rs | 7 +- secret_store/src/lib.rs | 10 +- secret_store/src/types/all.rs | 5 +- 13 files changed, 140 insertions(+), 32 deletions(-) create mode 100644 ethcore/native_contracts/src/secretstore_acl_storage.rs diff --git a/Cargo.lock b/Cargo.lock index 409dbc418..f36aa132e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -635,6 +635,8 @@ name = "ethcore-secretstore" version = "1.0.0" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore 1.7.0", "ethcore-devtools 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", @@ -646,6 +648,7 @@ dependencies = [ "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "native-contracts 0.1.0", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs index a8488617a..91eaa86cd 100644 --- a/ethcore/native_contracts/build.rs +++ b/ethcore/native_contracts/build.rs @@ -23,6 +23,7 @@ use std::io::Write; // TODO: `include!` these from files where they're pretty-printed? const REGISTRY_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}]"#; const SERVICE_TRANSACTION_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}]"#; +const SECRETSTORE_ACL_STORAGE_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"user","type":"address"},{"name":"document","type":"bytes32"}],"name":"checkPermissions","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}]"#; fn build_file(name: &str, abi: &str, filename: &str) { let code = ::native_contract_generator::generate_module(name, abi).unwrap(); @@ -37,4 +38,5 @@ fn build_file(name: &str, abi: &str, filename: &str) { fn main() { build_file("Registry", REGISTRY_ABI, "registry.rs"); build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); + build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs"); } diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index 55c6446b7..e894a636f 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -25,6 +25,8 @@ extern crate ethcore_util as util; mod registry; mod service_transaction; +mod secretstore_acl_storage; pub use self::registry::Registry; pub use self::service_transaction::ServiceTransactionChecker; +pub use self::secretstore_acl_storage::SecretStoreAclStorage; diff --git a/ethcore/native_contracts/src/secretstore_acl_storage.rs b/ethcore/native_contracts/src/secretstore_acl_storage.rs new file mode 100644 index 000000000..3ebfcfb75 --- /dev/null +++ b/ethcore/native_contracts/src/secretstore_acl_storage.rs @@ -0,0 +1,22 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Secret store ACL storage contract. +// TODO: testing. + +include!(concat!(env!("OUT_DIR"), "/secretstore_acl_storage.rs")); diff --git a/parity/run.rs b/parity/run.rs index a85bcc39b..8da5130d4 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -463,7 +463,9 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R let signer_server = signer::start(cmd.signer_conf.clone(), signer_deps)?; // secret store key server - let secretstore_deps = secretstore::Dependencies { }; + let secretstore_deps = secretstore::Dependencies { + client: client.clone(), + }; let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps); // the ipfs server diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 13d6d28d2..d31614193 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::sync::Arc; use dir::default_data_path; +use ethcore::client::Client; use helpers::replace_home; #[derive(Debug, PartialEq, Clone)] @@ -30,10 +32,10 @@ pub struct Configuration { pub data_path: String, } -#[derive(Debug, PartialEq, Clone)] /// Secret store dependencies pub struct Dependencies { - // the only dependency will be BlockChainClient + /// Blockchain client. + pub client: Arc, } #[cfg(not(feature = "secretstore"))] @@ -64,7 +66,7 @@ mod server { impl KeyServer { /// Create new key server - pub fn new(conf: Configuration, _deps: Dependencies) -> Result { + pub fn new(conf: Configuration, deps: Dependencies) -> Result { let key_pairs = vec![ ethkey::KeyPair::from_secret("6c26a76e9b31048d170873a791401c7e799a11f0cefc0171cc31a49800967509".parse().unwrap()).unwrap(), ethkey::KeyPair::from_secret("7e94018b3731afdb3b4e6f4c3e179475640166da12e1d1b0c7d80729b1a5b452".parse().unwrap()).unwrap(), @@ -96,7 +98,7 @@ mod server { } }; - let key_server = ethcore_secretstore::start(conf) + let key_server = ethcore_secretstore::start(deps.client, conf) .map_err(Into::::into)?; Ok(KeyServer { diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index fba76804b..539f15f1f 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -24,12 +24,15 @@ tokio-core = "0.1" tokio-service = "0.1" tokio-proto = "0.1" url = "1.0" +ethabi = "1.0.0" +ethcore = { path = "../ethcore" } ethcore-devtools = { path = "../devtools" } ethcore-util = { path = "../util" } ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc-nano = { path = "../ipc/nano" } ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } +native-contracts = { path = "../ethcore/native_contracts" } [profile.release] debug = true diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 47ec3d44a..fea45c920 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -14,38 +14,92 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::{HashMap, HashSet}; -use parking_lot::RwLock; +use std::sync::Arc; +use futures::{future, Future}; +use parking_lot::Mutex; +use ethkey::public_to_address; +use ethcore::client::{Client, BlockChainClient, BlockId}; +use native_contracts::SecretStoreAclStorage; use types::all::{Error, DocumentAddress, Public}; +const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; + /// ACL storage of Secret Store pub trait AclStorage: Send + Sync { /// Check if requestor with `public` key can access document with hash `document` fn check(&self, public: &Public, document: &DocumentAddress) -> Result; } -/// Dummy ACL storage implementation -#[derive(Default, Debug)] -pub struct DummyAclStorage { - prohibited: RwLock>>, +/// On-chain ACL storage implementation. +pub struct OnChainAclStorage { + /// Blockchain client. + client: Arc, + /// On-chain contract. + contract: Mutex>, } -impl DummyAclStorage { - #[cfg(test)] - /// Prohibit given requestor access to given document - pub fn prohibit(&self, public: Public, document: DocumentAddress) { - self.prohibited.write() - .entry(public) - .or_insert_with(Default::default) - .insert(document); +impl OnChainAclStorage { + pub fn new(client: Arc) -> Self { + OnChainAclStorage { + client: client, + contract: Mutex::new(None), + } } } -impl AclStorage for DummyAclStorage { +impl AclStorage for OnChainAclStorage { fn check(&self, public: &Public, document: &DocumentAddress) -> Result { - Ok(self.prohibited.read() - .get(public) - .map(|docs| !docs.contains(document)) - .unwrap_or(true)) + let mut contract = self.contract.lock(); + if !contract.is_some() { + *contract = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()) + .and_then(|contract_addr| { + trace!(target: "secretstore", "Configuring for ACL checker contract from {}", contract_addr); + + Some(SecretStoreAclStorage::new(contract_addr)) + }) + } + if let Some(ref contract) = *contract { + let address = public_to_address(&public); + let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); + contract.check_permissions(do_call, address, document.clone()) + .map_err(|err| Error::Internal(err)) + .wait() + } else { + Err(Error::Internal("ACL checker contract is not configured".to_owned())) + } + } +} + +#[cfg(test)] +pub mod tests { + use std::collections::{HashMap, HashSet}; + use parking_lot::RwLock; + use types::all::{Error, DocumentAddress, Public}; + use super::AclStorage; + + #[derive(Default, Debug)] + /// Dummy ACL storage implementation + pub struct DummyAclStorage { + prohibited: RwLock>>, + } + + impl DummyAclStorage { + #[cfg(test)] + /// Prohibit given requestor access to given document + pub fn prohibit(&self, public: Public, document: DocumentAddress) { + self.prohibited.write() + .entry(public) + .or_insert_with(Default::default) + .insert(document); + } + } + + impl AclStorage for DummyAclStorage { + fn check(&self, public: &Public, document: &DocumentAddress) -> Result { + Ok(self.prohibited.read() + .get(public) + .map(|docs| !docs.contains(document)) + .unwrap_or(true)) + } } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 553b49bfe..598f06338 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -147,7 +147,7 @@ mod tests { use std::sync::Arc; use ethcrypto; use ethkey::{self, Random, Generator}; - use acl_storage::DummyAclStorage; + use acl_storage::tests::DummyAclStorage; use key_storage::tests::DummyKeyStorage; use types::all::{ClusterConfiguration, NodeAddress, EncryptionConfiguration, DocumentEncryptedKey, DocumentKey}; use super::super::{RequestSignature, DocumentAddress}; diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index 71d8ad26f..652ed5c5a 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -220,7 +220,7 @@ impl SessionImpl { self.completed.notify_all(); }, // we can not decrypt data - SessionState::Failed => (), + SessionState::Failed => self.completed.notify_all(), // cannot reach other states _ => unreachable!("process_initialization_response can change state to WaitingForPartialDecryption or Failed; checked that we are in WaitingForInitializationConfirm state above; qed"), } @@ -285,7 +285,10 @@ impl SessionImpl { SessionState::WaitingForPartialDecryption => SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data), // we can not have enough nodes for decryption - SessionState::Failed => Ok(()), + SessionState::Failed => { + self.completed.notify_all(); + Ok(()) + }, // cannot reach other states _ => unreachable!("process_initialization_response can change state to WaitingForPartialDecryption or Failed; checked that we are in WaitingForInitializationConfirm state above; qed"), } @@ -480,6 +483,7 @@ fn process_initialization_response(encrypted_data: &DocumentKeyShare, data: &mut // check if we still can receive enough confirmations to do a decryption? if encrypted_data.id_numbers.len() - data.rejected_nodes.len() < encrypted_data.threshold + 1 { + data.decrypted_secret = Some(Err(Error::AccessDenied)); data.state = SessionState::Failed; } }, @@ -503,7 +507,7 @@ fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants mod tests { use std::sync::Arc; use std::collections::BTreeMap; - use super::super::super::acl_storage::DummyAclStorage; + use super::super::super::acl_storage::tests::DummyAclStorage; use ethkey::{self, Random, Generator, Public, Secret}; use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error}; use key_server_cluster::cluster::tests::DummyCluster; diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 8b33e06f7..e889ef322 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -21,7 +21,7 @@ use ethcrypto; use super::types::all::DocumentAddress; pub use super::types::all::{NodeId, EncryptionConfiguration}; -pub use super::acl_storage::{AclStorage, DummyAclStorage}; +pub use super::acl_storage::AclStorage; pub use super::key_storage::{KeyStorage, DocumentKeyShare}; pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic}; pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; @@ -30,6 +30,8 @@ pub use self::decryption_session::Session as DecryptionSession; #[cfg(test)] pub use super::key_storage::tests::DummyKeyStorage; +#[cfg(test)] +pub use super::acl_storage::tests::DummyAclStorage; pub type SessionId = DocumentAddress; @@ -72,6 +74,8 @@ pub enum Error { Serde(String), /// Key storage error. KeyStorage(String), + /// Acl storage error. + AccessDenied, } impl From for Error { @@ -110,6 +114,7 @@ impl fmt::Display for Error { Error::Io(ref e) => write!(f, "i/o error {}", e), Error::Serde(ref e) => write!(f, "serde error {}", e), Error::KeyStorage(ref e) => write!(f, "key storage error {}", e), + Error::AccessDenied => write!(f, "Access denied"), } } } diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index bbb8474d4..7de957991 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -32,11 +32,14 @@ extern crate tokio_service; extern crate tokio_proto; extern crate url; +extern crate ethabi; +extern crate ethcore; extern crate ethcore_devtools as devtools; extern crate ethcore_util as util; extern crate ethcore_ipc as ipc; extern crate ethcrypto; extern crate ethkey; +extern crate native_contracts; mod key_server_cluster; mod types; @@ -52,15 +55,18 @@ mod key_server; mod key_storage; mod serialization; +use std::sync::Arc; +use ethcore::client::Client; + pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public, Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, EncryptionConfiguration}; pub use traits::{KeyServer}; /// Start new key server instance -pub fn start(config: ServiceConfiguration) -> Result, Error> { +pub fn start(client: Arc, config: ServiceConfiguration) -> Result, Error> { use std::sync::Arc; - let acl_storage = Arc::new(acl_storage::DummyAclStorage::default()); + let acl_storage = Arc::new(acl_storage::OnChainAclStorage::new(client)); let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); let key_server = key_server::KeyServerImpl::new(&config.cluster_config, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(config, key_server)?; diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 514b4eb6b..23e07e994 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -119,7 +119,10 @@ impl From for Error { impl From for Error { fn from(err: key_server_cluster::Error) -> Self { - Error::Internal(err.into()) + match err { + key_server_cluster::Error::AccessDenied => Error::AccessDenied, + _ => Error::Internal(err.into()), + } } } From 50e0221dd147b64e0c7ac7e50c1c5611ce1a3b2a Mon Sep 17 00:00:00 2001 From: maciejhirsz Date: Mon, 3 Apr 2017 18:50:11 +0200 Subject: [PATCH 85/91] Perf and fixes --- js/package.json | 2 +- js/src/api/local/accounts/account.js | 40 +++--- js/src/api/local/accounts/accounts.js | 58 +++++--- js/src/api/local/ethkey/index.js | 39 +++--- js/src/api/local/ethkey/worker.js | 129 ++++++++++++------ js/src/api/local/ethkey/workerPool.js | 61 +++++++++ js/src/api/local/middleware.js | 81 +++++++---- js/src/api/transport/jsonRpcBase.js | 12 +- .../CreateAccount/NewAccount/newAccount.js | 13 +- .../NewAccount/newAccount.spec.js | 9 +- js/src/modals/CreateAccount/store.js | 6 +- js/src/modals/CreateAccount/store.spec.js | 3 + 12 files changed, 324 insertions(+), 129 deletions(-) create mode 100644 js/src/api/local/ethkey/workerPool.js diff --git a/js/package.json b/js/package.json index 82807e820..0585a1681 100644 --- a/js/package.json +++ b/js/package.json @@ -176,7 +176,7 @@ "geopattern": "1.2.3", "isomorphic-fetch": "2.2.1", "js-sha3": "0.5.5", - "keythereum": "0.4.3", + "keythereum": "0.4.6", "lodash": "4.17.2", "loglevel": "1.4.1", "marked": "0.3.6", diff --git a/js/src/api/local/accounts/account.js b/js/src/api/local/accounts/account.js index da9de1359..94e923f45 100644 --- a/js/src/api/local/accounts/account.js +++ b/js/src/api/local/accounts/account.js @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { keythereum } from '../ethkey'; +import { createKeyObject, decryptPrivateKey } from '../ethkey'; export default class Account { constructor (persist, data) { @@ -31,12 +31,14 @@ export default class Account { } isValidPassword (password) { - try { - keythereum.recover(Buffer.from(password), this._keyObject); - return true; - } catch (e) { - return false; - } + return decryptPrivateKey(this._keyObject, password) + .then((privateKey) => { + if (!privateKey) { + return false; + } + + return true; + }); } get address () { @@ -68,21 +70,23 @@ export default class Account { } decryptPrivateKey (password) { - return keythereum.recover(Buffer.from(password), this._keyObject); + return decryptPrivateKey(this._keyObject, password); + } + + changePassword (key, password) { + return createKeyObject(key, password).then((keyObject) => { + this._keyObject = keyObject; + + this._persist(); + }); } static fromPrivateKey (persist, key, password) { - const iv = keythereum.crypto.randomBytes(16); - const salt = keythereum.crypto.randomBytes(32); + return createKeyObject(key, password).then((keyObject) => { + const account = new Account(persist, { keyObject }); - // Keythereum will fail if `password` is an empty string - password = Buffer.from(password); - - const keyObject = keythereum.dump(password, key, salt, iv); - - const account = new Account(persist, { keyObject }); - - return account; + return account; + }); } toJSON () { diff --git a/js/src/api/local/accounts/accounts.js b/js/src/api/local/accounts/accounts.js index 576addcb1..1bce1329a 100644 --- a/js/src/api/local/accounts/accounts.js +++ b/js/src/api/local/accounts/accounts.js @@ -38,14 +38,22 @@ export default class Accounts { create (secret, password) { const privateKey = Buffer.from(secret.slice(2), 'hex'); - const account = Account.fromPrivateKey(this.persist, privateKey, password); - this._store.push(account); - this.lastAddress = account.address; + return Account.fromPrivateKey(this.persist, privateKey, password) + .then((account) => { + const { address } = account; - this.persist(); + if (this._store.find((account) => account.address === address)) { + throw new Error(`Account ${address} already exists!`); + } - return account.address; + this._store.push(account); + this.lastAddress = address; + + this.persist(); + + return account.address; + }); } set lastAddress (value) { @@ -73,28 +81,40 @@ export default class Accounts { remove (address, password) { address = address.toLowerCase(); + const account = this.get(address); + + if (!account) { + return false; + } + + return account.isValidPassword(password) + .then((isValid) => { + if (!isValid) { + return false; + } + + if (address === this.lastAddress) { + this.lastAddress = NULL_ADDRESS; + } + + this.removeUnsafe(address); + + return true; + }); + } + + removeUnsafe (address) { + address = address.toLowerCase(); + const index = this._store.findIndex((account) => account.address === address); if (index === -1) { - return false; - } - - const account = this._store[index]; - - if (!account.isValidPassword(password)) { - console.log('invalid password'); - return false; - } - - if (address === this.lastAddress) { - this.lastAddress = NULL_ADDRESS; + return; } this._store.splice(index, 1); this.persist(); - - return true; } mapArray (mapper) { diff --git a/js/src/api/local/ethkey/index.js b/js/src/api/local/ethkey/index.js index ac2efa72e..4539c8c50 100644 --- a/js/src/api/local/ethkey/index.js +++ b/js/src/api/local/ethkey/index.js @@ -14,31 +14,34 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -// Allow a web worker in the browser, with a fallback for Node.js -const hasWebWorkers = typeof Worker !== 'undefined'; -const KeyWorker = hasWebWorkers ? require('worker-loader!./worker') - : require('./worker').KeyWorker; +import workerPool from './workerPool'; -// Local accounts should never be used outside of the browser -export let keythereum = null; +export function createKeyObject (key, password) { + return workerPool.getWorker().action('createKeyObject', { key, password }) + .then((obj) => JSON.parse(obj)); +} -if (hasWebWorkers) { - require('keythereum/dist/keythereum'); +export function decryptPrivateKey (keyObject, password) { + return workerPool.getWorker() + .action('decryptPrivateKey', { keyObject, password }) + .then((privateKey) => { + if (privateKey) { + return Buffer.from(privateKey); + } - keythereum = window.keythereum; + return null; + }); } export function phraseToAddress (phrase) { - return phraseToWallet(phrase).then((wallet) => wallet.address); + return phraseToWallet(phrase) + .then((wallet) => wallet.address); } export function phraseToWallet (phrase) { - return new Promise((resolve, reject) => { - const worker = new KeyWorker(); - - worker.postMessage(phrase); - worker.onmessage = ({ data }) => { - resolve(data); - }; - }); + return workerPool.getWorker().action('phraseToWallet', phrase); +} + +export function verifySecret (secret) { + return workerPool.getWorker().action('verifySecret', secret); } diff --git a/js/src/api/local/ethkey/worker.js b/js/src/api/local/ethkey/worker.js index a472ee29a..3a0c34f7d 100644 --- a/js/src/api/local/ethkey/worker.js +++ b/js/src/api/local/ethkey/worker.js @@ -14,58 +14,107 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { keccak_256 as keccak256 } from 'js-sha3'; import secp256k1 from 'secp256k1/js'; +import { keccak_256 as keccak256 } from 'js-sha3'; + +const isWorker = typeof self !== 'undefined'; // Stay compatible between environments -if (typeof self !== 'object') { +if (!isWorker) { const scope = typeof global === 'undefined' ? window : global; scope.self = scope; } +// keythereum should never be used outside of the browser +let keythereum = null; + +if (isWorker) { + require('keythereum/dist/keythereum'); + + keythereum = self.keythereum; +} + +function route ({ action, payload }) { + if (action in actions) { + return actions[action](payload); + } + + return null; +} + +const actions = { + phraseToWallet (phrase) { + let secret = keccak256.array(phrase); + + for (let i = 0; i < 16384; i++) { + secret = keccak256.array(secret); + } + + while (true) { + secret = keccak256.array(secret); + + const secretBuf = Buffer.from(secret); + + if (secp256k1.privateKeyVerify(secretBuf)) { + // No compression, slice out last 64 bytes + const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64); + const address = keccak256.array(publicBuf).slice(12); + + if (address[0] !== 0) { + continue; + } + + const wallet = { + secret: bytesToHex(secretBuf), + public: bytesToHex(publicBuf), + address: bytesToHex(address) + }; + + return wallet; + } + } + }, + + verifySecret (secret) { + const key = Buffer.from(secret.slice(2), 'hex'); + + return secp256k1.privateKeyVerify(key); + }, + + createKeyObject ({ key, password }) { + key = Buffer.from(key); + password = Buffer.from(password); + + const iv = keythereum.crypto.randomBytes(16); + const salt = keythereum.crypto.randomBytes(32); + const keyObject = keythereum.dump(password, key, salt, iv); + + return JSON.stringify(keyObject); + }, + + decryptPrivateKey ({ keyObject, password }) { + password = Buffer.from(password); + + try { + const key = keythereum.recover(password, keyObject); + + // Convert to array to safely send from the worker + return Array.from(key); + } catch (e) { + return null; + } + } +}; + function bytesToHex (bytes) { return '0x' + Array.from(bytes).map(n => ('0' + n.toString(16)).slice(-2)).join(''); } -// Logic ported from /ethkey/src/brain.rs -function phraseToWallet (phrase) { - let secret = keccak256.array(phrase); - - for (let i = 0; i < 16384; i++) { - secret = keccak256.array(secret); - } - - while (true) { - secret = keccak256.array(secret); - - const secretBuf = Buffer.from(secret); - - if (secp256k1.privateKeyVerify(secretBuf)) { - // No compression, slice out last 64 bytes - const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64); - const address = keccak256.array(publicBuf).slice(12); - - if (address[0] !== 0) { - continue; - } - - const wallet = { - secret: bytesToHex(secretBuf), - public: bytesToHex(publicBuf), - address: bytesToHex(address) - }; - - return wallet; - } - } -} - self.onmessage = function ({ data }) { - const wallet = phraseToWallet(data); + const result = route(data); - postMessage(wallet); - close(); + postMessage(result); }; // Emulate a web worker in Node.js @@ -73,9 +122,9 @@ class KeyWorker { postMessage (data) { // Force async setTimeout(() => { - const wallet = phraseToWallet(data); + const result = route(data); - this.onmessage({ data: wallet }); + this.onmessage({ data: result }); }, 0); } diff --git a/js/src/api/local/ethkey/workerPool.js b/js/src/api/local/ethkey/workerPool.js new file mode 100644 index 000000000..ff5315898 --- /dev/null +++ b/js/src/api/local/ethkey/workerPool.js @@ -0,0 +1,61 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +// Allow a web worker in the browser, with a fallback for Node.js +const hasWebWorkers = typeof Worker !== 'undefined'; +const KeyWorker = hasWebWorkers ? require('worker-loader!./worker') + : require('./worker').KeyWorker; + +class WorkerContainer { + busy = false; + _worker = new KeyWorker(); + + action (action, payload) { + if (this.busy) { + throw new Error('Cannot issue an action on a busy worker!'); + } + + this.busy = true; + + return new Promise((resolve, reject) => { + this._worker.postMessage({ action, payload }); + this._worker.onmessage = ({ data }) => { + this.busy = false; + resolve(data); + }; + }); + } +} + +class WorkerPool { + pool = []; + + getWorker () { + let container = this.pool.find((container) => !container.busy); + + if (container) { + return container; + } + + container = new WorkerContainer(); + + this.pool.push(container); + + return container; + } +} + +export default new WorkerPool(); diff --git a/js/src/api/local/middleware.js b/js/src/api/local/middleware.js index d5997c60a..ece3fa17d 100644 --- a/js/src/api/local/middleware.js +++ b/js/src/api/local/middleware.js @@ -19,7 +19,7 @@ import accounts from './accounts'; import transactions from './transactions'; import { Middleware } from '../transport'; import { inNumber16 } from '../format/input'; -import { phraseToWallet, phraseToAddress } from './ethkey'; +import { phraseToWallet, phraseToAddress, verifySecret } from './ethkey'; import { randomPhrase } from '@parity/wordlist'; export default class LocalAccountsMiddleware extends Middleware { @@ -57,6 +57,21 @@ export default class LocalAccountsMiddleware extends Middleware { }); }); + register('parity_changePassword', ([address, oldPassword, newPassword]) => { + const account = accounts.get(address); + + return account.decryptPrivateKey(oldPassword) + .then((privateKey) => { + if (!privateKey) { + return false; + } + + account.changePassword(privateKey, newPassword); + + return true; + }); + }); + register('parity_checkRequest', ([id]) => { return transactions.hash(id) || Promise.resolve(null); }); @@ -84,6 +99,17 @@ export default class LocalAccountsMiddleware extends Middleware { }); }); + register('parity_newAccountFromSecret', ([secret, password]) => { + return verifySecret(secret) + .then((isValid) => { + if (!isValid) { + throw new Error('Invalid secret key'); + } + + return accounts.create(secret, password); + }); + }); + register('parity_setAccountMeta', ([address, meta]) => { accounts.get(address).meta = meta; @@ -127,6 +153,12 @@ export default class LocalAccountsMiddleware extends Middleware { return accounts.remove(address, password); }); + register('parity_testPassword', ([address, password]) => { + const account = accounts.get(address); + + return account.isValidPassword(password); + }); + register('signer_confirmRequest', ([id, modify, password]) => { const { gasPrice, @@ -137,30 +169,33 @@ export default class LocalAccountsMiddleware extends Middleware { data } = Object.assign(transactions.get(id), modify); - return this - .rpcRequest('parity_nextNonce', [from]) - .then((nonce) => { - const tx = new EthereumTx({ - nonce, - to, - data, - gasLimit: inNumber16(gasLimit), - gasPrice: inNumber16(gasPrice), - value: inNumber16(value) - }); - const account = accounts.get(from); + const account = accounts.get(from); - tx.sign(account.decryptPrivateKey(password)); - - const serializedTx = `0x${tx.serialize().toString('hex')}`; - - return this.rpcRequest('eth_sendRawTransaction', [serializedTx]); - }) - .then((hash) => { - transactions.confirm(id, hash); - - return {}; + return Promise.all([ + this.rpcRequest('parity_nextNonce', [from]), + account.decryptPrivateKey(password) + ]) + .then(([nonce, privateKey]) => { + const tx = new EthereumTx({ + nonce, + to, + data, + gasLimit: inNumber16(gasLimit), + gasPrice: inNumber16(gasPrice), + value: inNumber16(value) }); + + tx.sign(privateKey); + + const serializedTx = `0x${tx.serialize().toString('hex')}`; + + return this.rpcRequest('eth_sendRawTransaction', [serializedTx]); + }) + .then((hash) => { + transactions.confirm(id, hash); + + return {}; + }); }); register('signer_rejectRequest', ([id]) => { diff --git a/js/src/api/transport/jsonRpcBase.js b/js/src/api/transport/jsonRpcBase.js index 46df718a7..573204c3e 100644 --- a/js/src/api/transport/jsonRpcBase.js +++ b/js/src/api/transport/jsonRpcBase.js @@ -80,12 +80,16 @@ export default class JsonRpcBase extends EventEmitter { const res = middleware.handle(method, params); if (res != null) { - const result = this._wrapSuccessResult(res); - const json = this.encode(method, params); + // If `res` isn't a promise, we need to wrap it + return Promise.resolve(res) + .then((res) => { + const result = this._wrapSuccessResult(res); + const json = this.encode(method, params); - Logging.send(method, params, { json, result }); + Logging.send(method, params, { json, result }); - return res; + return res; + }); } } diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.js b/js/src/modals/CreateAccount/NewAccount/newAccount.js index 04f2f272a..9c6be9f6e 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.js @@ -23,6 +23,7 @@ import { RadioButton, RadioButtonGroup } from 'material-ui/RadioButton'; import { Form, Input, IdentityIcon } from '~/ui'; import PasswordStrength from '~/ui/Form/PasswordStrength'; import { RefreshIcon } from '~/ui/Icons'; +import Loading from '~/ui/Loading'; import ChangeVault from '../ChangeVault'; import styles from '../createAccount.css'; @@ -170,7 +171,9 @@ export default class CreateAccount extends Component { const { accounts } = this.state; if (!accounts) { - return null; + return ( + + ); } const identities = Object @@ -205,6 +208,14 @@ export default class CreateAccount extends Component { createIdentities = () => { const { createStore } = this.props; + this.setState({ + accounts: null, + selectedAddress: '' + }); + + createStore.setAddress(''); + createStore.setPhrase(''); + return createStore .createIdentities() .then((accounts) => { diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js index 87c7ba3fc..d6d38779f 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js @@ -58,12 +58,13 @@ describe('modals/CreateAccount/NewAccount', () => { return instance.componentWillMount(); }); - it('creates initial accounts', () => { - expect(Object.keys(instance.state.accounts).length).to.equal(7); + it('resets the accounts', () => { + expect(instance.state.accounts).to.be.null; + // expect(Object.keys(instance.state.accounts).length).to.equal(7); }); - it('sets the initial selected value', () => { - expect(instance.state.selectedAddress).to.equal(Object.keys(instance.state.accounts)[0]); + it('resets the initial selected value', () => { + expect(instance.state.selectedAddress).to.equal(''); }); }); }); diff --git a/js/src/modals/CreateAccount/store.js b/js/src/modals/CreateAccount/store.js index 52dddac80..9bc60d9af 100644 --- a/js/src/modals/CreateAccount/store.js +++ b/js/src/modals/CreateAccount/store.js @@ -69,7 +69,7 @@ export default class Store { return !(this.nameError || this.walletFileError); case 'fromNew': - return !(this.nameError || this.passwordRepeatError); + return !(this.nameError || this.passwordRepeatError) && this.hasAddress; case 'fromPhrase': return !(this.nameError || this.passwordRepeatError); @@ -85,6 +85,10 @@ export default class Store { } } + @computed get hasAddress () { + return !!(this.address); + } + @computed get passwordRepeatError () { return this.password === this.passwordRepeat ? null diff --git a/js/src/modals/CreateAccount/store.spec.js b/js/src/modals/CreateAccount/store.spec.js index b02f013b6..9d7bc10a2 100644 --- a/js/src/modals/CreateAccount/store.spec.js +++ b/js/src/modals/CreateAccount/store.spec.js @@ -329,6 +329,7 @@ describe('modals/CreateAccount/Store', () => { describe('createType === fromNew', () => { beforeEach(() => { store.setCreateType('fromNew'); + store.setAddress('0x0000000000000000000000000000000000000000'); }); it('returns true on no errors', () => { @@ -337,11 +338,13 @@ describe('modals/CreateAccount/Store', () => { it('returns false on nameError', () => { store.setName(''); + expect(store.canCreate).to.be.false; }); it('returns false on passwordRepeatError', () => { store.setPassword('testing'); + expect(store.canCreate).to.be.false; }); }); From 94bfe116aa2b5908803b0f11ec2c11e79a32accd Mon Sep 17 00:00:00 2001 From: maciejhirsz Date: Tue, 4 Apr 2017 11:49:36 +0200 Subject: [PATCH 86/91] CR fixes --- js/src/api/local/accounts/accounts.js | 6 ++++-- js/src/api/local/ethkey/index.js | 3 ++- js/src/api/local/ethkey/worker.js | 5 +---- js/src/api/local/middleware.js | 3 ++- js/src/api/util/format.js | 2 +- js/src/modals/CreateAccount/NewAccount/newAccount.spec.js | 1 - 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/js/src/api/local/accounts/accounts.js b/js/src/api/local/accounts/accounts.js index 1bce1329a..e7e5cc397 100644 --- a/js/src/api/local/accounts/accounts.js +++ b/js/src/api/local/accounts/accounts.js @@ -39,7 +39,8 @@ export default class Accounts { create (secret, password) { const privateKey = Buffer.from(secret.slice(2), 'hex'); - return Account.fromPrivateKey(this.persist, privateKey, password) + return Account + .fromPrivateKey(this.persist, privateKey, password) .then((account) => { const { address } = account; @@ -87,7 +88,8 @@ export default class Accounts { return false; } - return account.isValidPassword(password) + return account + .isValidPassword(password) .then((isValid) => { if (!isValid) { return false; diff --git a/js/src/api/local/ethkey/index.js b/js/src/api/local/ethkey/index.js index 4539c8c50..a6967da25 100644 --- a/js/src/api/local/ethkey/index.js +++ b/js/src/api/local/ethkey/index.js @@ -22,7 +22,8 @@ export function createKeyObject (key, password) { } export function decryptPrivateKey (keyObject, password) { - return workerPool.getWorker() + return workerPool + .getWorker() .action('decryptPrivateKey', { keyObject, password }) .then((privateKey) => { if (privateKey) { diff --git a/js/src/api/local/ethkey/worker.js b/js/src/api/local/ethkey/worker.js index 3a0c34f7d..00f4a0bed 100644 --- a/js/src/api/local/ethkey/worker.js +++ b/js/src/api/local/ethkey/worker.js @@ -16,6 +16,7 @@ import secp256k1 from 'secp256k1/js'; import { keccak_256 as keccak256 } from 'js-sha3'; +import { bytesToHex } from '~/api/util/format'; const isWorker = typeof self !== 'undefined'; @@ -107,10 +108,6 @@ const actions = { } }; -function bytesToHex (bytes) { - return '0x' + Array.from(bytes).map(n => ('0' + n.toString(16)).slice(-2)).join(''); -} - self.onmessage = function ({ data }) { const result = route(data); diff --git a/js/src/api/local/middleware.js b/js/src/api/local/middleware.js index ece3fa17d..36a8cd2cf 100644 --- a/js/src/api/local/middleware.js +++ b/js/src/api/local/middleware.js @@ -60,7 +60,8 @@ export default class LocalAccountsMiddleware extends Middleware { register('parity_changePassword', ([address, oldPassword, newPassword]) => { const account = accounts.get(address); - return account.decryptPrivateKey(oldPassword) + return account + .decryptPrivateKey(oldPassword) .then((privateKey) => { if (!privateKey) { return false; diff --git a/js/src/api/util/format.js b/js/src/api/util/format.js index c7594b692..61fc9d32c 100644 --- a/js/src/api/util/format.js +++ b/js/src/api/util/format.js @@ -17,7 +17,7 @@ import { range } from 'lodash'; export function bytesToHex (bytes) { - return '0x' + bytes.map((b) => ('0' + b.toString(16)).slice(-2)).join(''); + return '0x' + Buffer.from(bytes).toString('hex'); } export function cleanupValue (value, type) { diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js index d6d38779f..935fe5b80 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js @@ -60,7 +60,6 @@ describe('modals/CreateAccount/NewAccount', () => { it('resets the accounts', () => { expect(instance.state.accounts).to.be.null; - // expect(Object.keys(instance.state.accounts).length).to.equal(7); }); it('resets the initial selected value', () => { From 9b212dc518a8c3d579e8be4b6da748e916351377 Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Tue, 4 Apr 2017 12:51:54 +0300 Subject: [PATCH 87/91] Update cov.sh remove src/test from EXCLUDE --- scripts/cov.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/cov.sh b/scripts/cov.sh index 13ab792c7..13d042905 100755 --- a/scripts/cov.sh +++ b/scripts/cov.sh @@ -32,7 +32,6 @@ $HOME/.cargo,\ $HOME/.multirust,\ rocksdb,\ secp256k1,\ -src/tests,\ util/json-tests,\ util/src/network/tests,\ ethcore/src/evm/tests,\ From 5f9dc132074c6c2b33d5e9eb632c932925fab6f1 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 4 Apr 2017 14:46:42 +0200 Subject: [PATCH 88/91] Straight download path in the readme (#5393) --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d92b68c58..f668f3218 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ # [Parity](https://ethcore.io/parity.html) ### Fast, light, and robust Ethereum implementation +### [Download latest release](https://github.com/paritytech/parity/releases) + [![build status](https://gitlab.ethcore.io/parity/parity/badges/master/build.svg)](https://gitlab.ethcore.io/parity/parity/commits/master) [![Coverage Status][coveralls-image]][coveralls-url] [![GPLv3][license-image]][license-url] ### Join the chat! @@ -22,7 +24,6 @@ Be sure to check out [our wiki][wiki-url] for more information. [doc-url]: https://paritytech.github.io/parity/ethcore/index.html [wiki-url]: https://github.com/paritytech/parity/wiki -**Parity requires Rust version 1.15.0 to build** ---- @@ -45,14 +46,14 @@ of RPC APIs. If you run into an issue while using parity, feel free to file one in this repository or hop on our [gitter chat room][gitter-url] to ask a question. We are glad to help! -Parity's current release is 1.5. You can download it at https://parity.io or follow the instructions +Parity's current release is 1.6. You can download it at https://github.com/paritytech/parity/releases or follow the instructions below to build from source. ---- ## Build dependencies -Parity is fully compatible with Stable Rust. +**Parity requires Rust version 1.16.0 to build** We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have rustup, you can install it like this: @@ -80,7 +81,7 @@ Once you have rustup, install parity or download and build from source ---- -## Quick install +## Quick build and install ```bash cargo install --git https://github.com/paritytech/parity.git parity From 8d0fde6f608f7794e8c18b18e762f77741473013 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Wed, 5 Apr 2017 09:06:09 +0000 Subject: [PATCH 89/91] [ci skip] js-precompiled 20170405-090226 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f36aa132e..d50bf5c3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1755,7 +1755,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#04143247380a7a9bce112c9467636684d8214973" +source = "git+https://github.com/paritytech/js-precompiled.git#9bfc6f3dfca2c337c53084bedcc65c2b526927a1" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 0585a1681..6e8b84f6d 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.45", + "version": "1.7.46", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 0d8a2c8c44cf1655e4580bbe6bae0314662ffc49 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Apr 2017 11:30:51 +0200 Subject: [PATCH 90/91] CHT key optimization --- ethcore/light/src/client/header_chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 1c218204b..d4ea8d107 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -119,7 +119,7 @@ impl Decodable for Entry { } fn cht_key(number: u64) -> String { - format!("canonical_{}", number) + format!("{:08x}_canonical", number) } fn era_key(number: u64) -> String { From e2dfea8c12978619db30d8f44183ff3909f215b7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Apr 2017 11:57:29 +0200 Subject: [PATCH 91/91] set gas limit before proving transactions --- ethcore/light/src/on_demand/request.rs | 4 +++- ethcore/src/client/client.rs | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index cda1d6feb..d3bb06888 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -243,12 +243,14 @@ impl TransactionProof { pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution { let root = self.header.state_root(); + let mut env_info = self.env_info.clone(); + env_info.gas_limit = self.tx.gas.clone(); state::check_proof( state_items, root, &self.tx, &*self.engine, - &self.env_info, + &env_info, ) } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 3accc777f..4bd29d100 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -395,7 +395,7 @@ impl Client { if header.number() < self.engine().params().validate_receipts_transition && header.receipts_root() != locked_block.block().header().receipts_root() { locked_block = locked_block.strip_receipts(); } - + // Final Verification if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); @@ -1627,10 +1627,12 @@ impl ::client::ProvingBlockChainClient for Client { } fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { - let (state, env_info) = match (self.state_at(id), self.env_info(id)) { + let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) { (Some(s), Some(e)) => (s, e), _ => return None, }; + + env_info.gas_limit = transaction.gas.clone(); let mut jdb = self.state_db.lock().journal_db().boxed_clone(); let backend = state::backend::Proving::new(jdb.as_hashdb_mut());