From 36556016933acce95c63624535461a7ebefd0f5f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 17:40:03 +0100 Subject: [PATCH 01/89] return errors on database corruption --- ethcore/src/block.rs | 3 +- ethcore/src/client/client.rs | 60 +++++---- ethcore/src/ethereum/ethash.rs | 10 +- ethcore/src/evm/evm.rs | 15 ++- ethcore/src/evm/ext.rs | 19 +-- ethcore/src/evm/interpreter/gasometer.rs | 12 +- ethcore/src/evm/interpreter/mod.rs | 20 +-- ethcore/src/executive.rs | 40 +++--- ethcore/src/externalities.rs | 65 ++++++--- ethcore/src/miner/miner.rs | 32 +++-- ethcore/src/miner/mod.rs | 10 +- ethcore/src/state/account.rs | 33 ++--- ethcore/src/state/mod.rs | 164 ++++++++++++----------- ethcore/src/types/executed.rs | 23 +++- ethcore/src/types/trace_types/error.rs | 14 +- 15 files changed, 290 insertions(+), 230 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index f2eff0d04..3626fdd3a 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -540,7 +540,8 @@ pub fn enact( { if ::log::max_log_level() >= ::log::LogLevel::Trace { let s = State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(), factories.clone())?; - trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", header.number(), s.root(), header.author(), s.balance(&header.author())); + trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", + header.number(), s.root(), header.author(), s.balance(&header.author())?); } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 96b25b351..7f209bad1 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -890,17 +890,20 @@ impl BlockChainClient for Client { let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let sender = t.sender(); - let balance = state.balance(&sender); + let balance = state.balance(&sender).map_err(|_| CallError::StateCorrupt)?; let needed_balance = t.value + t.gas * t.gas_price; if balance < needed_balance { // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty); + state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) + .map_err(|_| CallError::StateCorrupt)?; } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, options)?; // TODO gav move this into Executive. - ret.state_diff = original_state.map(|original| state.diff_from(original)); + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); + } Ok(ret) } @@ -921,7 +924,7 @@ impl BlockChainClient for Client { // that's just a copy of the state. let original_state = self.state_at(block).ok_or(CallError::StatePruned)?; let sender = t.sender(); - let balance = original_state.balance(&sender); + let balance = original_state.balance(&sender).map_err(ExecutionError::from)?; let options = TransactOptions { tracing: true, vm_tracing: false, check_nonce: false }; let cond = |gas| { @@ -933,27 +936,29 @@ impl BlockChainClient for Client { let needed_balance = tx.value + tx.gas * tx.gas_price; if balance < needed_balance { // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty); + state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) + .map_err(ExecutionError::from)?; } - Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm) + Ok(Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm) .transact(&tx, options.clone()) .map(|r| r.exception.is_none()) - .unwrap_or(false) + .unwrap_or(false)) }; let mut upper = header.gas_limit(); - if !cond(upper) { + if !cond(upper)? { // impossible at block gas limit - try `UPPER_CEILING` instead. // TODO: consider raising limit by powers of two. upper = UPPER_CEILING.into(); - if !cond(upper) { + if !cond(upper)? { trace!(target: "estimate_gas", "estimate_gas failed with {}", upper); - return Err(CallError::Execution(ExecutionError::Internal)) + let err = ExecutionError::Internal(format!("Requires higher than upper limit of {}", upper)); + return Err(err.into()) } } let lower = t.gas_required(&self.engine.schedule(&env_info)).into(); - if cond(lower) { + if cond(lower)? { trace!(target: "estimate_gas", "estimate_gas succeeded with {}", lower); return Ok(lower) } @@ -961,23 +966,25 @@ impl BlockChainClient for Client { /// Find transition point between `lower` and `upper` where `cond` changes from `false` to `true`. /// Returns the lowest value between `lower` and `upper` for which `cond` returns true. /// We assert: `cond(lower) = false`, `cond(upper) = true` - fn binary_chop(mut lower: U256, mut upper: U256, mut cond: F) -> U256 where F: FnMut(U256) -> bool { + fn binary_chop(mut lower: U256, mut upper: U256, mut cond: F) -> Result + where F: FnMut(U256) -> Result + { while upper - lower > 1.into() { let mid = (lower + upper) / 2.into(); trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper); - let c = cond(mid); + let c = cond(mid)?; match c { true => upper = mid, false => lower = mid, }; trace!(target: "estimate_gas", "{} => {} .. {}", c, lower, upper); } - upper + Ok(upper) } // binary chop to non-excepting call with gas somewhere between 21000 and block gas limit trace!(target: "estimate_gas", "estimate_gas chopping {} .. {}", lower, upper); - Ok(binary_chop(lower, upper, cond)) + binary_chop(lower, upper, cond) } fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result { @@ -1006,17 +1013,16 @@ impl BlockChainClient for Client { let rest = txs.split_off(address.index); for t in txs { let t = SignedTransaction::new(t).expect(PROOF); - match Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&t, Default::default()) { - Ok(x) => { env_info.gas_used = env_info.gas_used + x.gas_used; } - Err(ee) => { return Err(CallError::Execution(ee)) } - } + let x = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&t, Default::default())?; + env_info.gas_used = env_info.gas_used + x.gas_used; } let first = rest.into_iter().next().expect("We split off < `address.index`; Length is checked earlier; qed"); let t = SignedTransaction::new(first).expect(PROOF); let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&t, options)?; - ret.state_diff = original_state.map(|original| state.diff_from(original)); - + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?) + } Ok(ret) } @@ -1108,11 +1114,11 @@ impl BlockChainClient for Client { } fn nonce(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).map(|s| s.nonce(address)) + self.state_at(id).and_then(|s| s.nonce(address).ok()) } fn storage_root(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).and_then(|s| s.storage_root(address)) + self.state_at(id).and_then(|s| s.storage_root(address).ok()).and_then(|x| x) } fn block_hash(&self, id: BlockId) -> Option { @@ -1121,15 +1127,15 @@ impl BlockChainClient for Client { } fn code(&self, address: &Address, id: BlockId) -> Option> { - self.state_at(id).map(|s| s.code(address).map(|c| (*c).clone())) + self.state_at(id).and_then(|s| s.code(address).ok()).map(|c| c.map(|c| (&*c).clone())) } fn balance(&self, address: &Address, id: BlockId) -> Option { - self.state_at(id).map(|s| s.balance(address)) + self.state_at(id).and_then(|s| s.balance(address).ok()) } fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option { - self.state_at(id).map(|s| s.storage_at(address, position)) + self.state_at(id).and_then(|s| s.storage_at(address, position).ok()) } fn list_accounts(&self, id: BlockId, after: Option<&Address>, count: u64) -> Option> { @@ -1182,7 +1188,7 @@ impl BlockChainClient for Client { }; let root = match state.storage_root(account) { - Some(root) => root, + Ok(Some(root)) => root, _ => return None, }; diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 6c7b91d39..3c9196a85 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -215,8 +215,14 @@ impl Engine for Ethash { // if block.fields().header.gas_limit() <= 4_000_000.into() { let mut state = block.fields_mut().state; for child in &self.ethash_params.dao_hardfork_accounts { - let b = state.balance(child); - state.transfer_balance(child, &self.ethash_params.dao_hardfork_beneficiary, &b, CleanupMode::NoEmpty); + let beneficiary = &self.ethash_params.dao_hardfork_beneficiary; + let res = state.balance(child) + .and_then(|b| state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty)); + + if let Err(e) = res { + warn!("Unable to apply DAO hardfork due to database corruption."); + warn!("Your node is now likely out of consensus."); + } } // } } diff --git a/ethcore/src/evm/evm.rs b/ethcore/src/evm/evm.rs index 420ebb6a0..09a93f087 100644 --- a/ethcore/src/evm/evm.rs +++ b/ethcore/src/evm/evm.rs @@ -17,12 +17,12 @@ //! Evm interface. use std::{ops, cmp, fmt}; -use util::{U128, U256, U512, Uint}; +use util::{U128, U256, U512, Uint, trie}; use action_params::ActionParams; use evm::Ext; /// Evm errors. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum Error { /// `OutOfGas` is returned when transaction execution runs out of gas. /// The state should be reverted to the state from before the @@ -61,8 +61,13 @@ pub enum Error { }, /// Returned on evm internal error. Should never be ignored during development. /// Likely to cause consensus issues. - #[allow(dead_code)] // created only by jit - Internal, + Internal(String), +} + +impl From> for Error { + fn from(err: Box) -> Self { + Error::Internal(format!("Internal error: {}", err)) + } } impl fmt::Display for Error { @@ -74,7 +79,7 @@ impl fmt::Display for Error { BadInstruction { .. } => "Bad instruction", StackUnderflow { .. } => "Stack underflow", OutOfStack { .. } => "Out of stack", - Internal => "Internal error", + Internal(ref msg) => msg, }; message.fmt(f) } diff --git a/ethcore/src/evm/ext.rs b/ethcore/src/evm/ext.rs index e2578cc68..352ffb7d9 100644 --- a/ethcore/src/evm/ext.rs +++ b/ethcore/src/evm/ext.rs @@ -42,24 +42,25 @@ pub enum MessageCallResult { } /// Externalities interface for EVMs +// TODO: [rob] associated error type instead of `trie::Result`. Not all EVMs are trie powered. pub trait Ext { /// Returns a value for given key. - fn storage_at(&self, key: &H256) -> H256; + fn storage_at(&self, key: &H256) -> trie::Result; /// Stores a value for given key. - fn set_storage(&mut self, key: H256, value: H256); + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()>; /// Determine whether an account exists. - fn exists(&self, address: &Address) -> bool; + fn exists(&self, address: &Address) -> trie::Result; /// Determine whether an account exists and is not null (zero balance/nonce, no code). - fn exists_and_not_null(&self, address: &Address) -> bool; + fn exists_and_not_null(&self, address: &Address) -> trie::Result; /// Balance of the origin account. - fn origin_balance(&self) -> U256; + fn origin_balance(&self) -> trie::Result; /// Returns address balance. - fn balance(&self, address: &Address) -> U256; + fn balance(&self, address: &Address) -> trie::Result; /// Returns the hash of one of the 256 most recent complete blocks. fn blockhash(&self, number: &U256) -> H256; @@ -87,10 +88,10 @@ pub trait Ext { ) -> MessageCallResult; /// Returns code at given address - fn extcode(&self, address: &Address) -> Arc; + fn extcode(&self, address: &Address) -> trie::Result>; /// Returns code size at given address - fn extcodesize(&self, address: &Address) -> usize; + fn extcodesize(&self, address: &Address) -> trie::Result; /// Creates log entry with given topics and data fn log(&mut self, topics: Vec, data: &[u8]); @@ -101,7 +102,7 @@ pub trait Ext { /// Should be called when contract commits suicide. /// Address to which funds should be refunded. - fn suicide(&mut self, refund_address: &Address); + fn suicide(&mut self, refund_address: &Address) -> trie::Result<()> ; /// Returns schedule. fn schedule(&self) -> &Schedule; diff --git a/ethcore/src/evm/interpreter/gasometer.rs b/ethcore/src/evm/interpreter/gasometer.rs index 5c96c3c05..9086200fa 100644 --- a/ethcore/src/evm/interpreter/gasometer.rs +++ b/ethcore/src/evm/interpreter/gasometer.rs @@ -123,7 +123,7 @@ impl Gasometer { instructions::SSTORE => { let address = H256::from(stack.peek(0)); let newval = stack.peek(1); - let val = U256::from(&*ext.storage_at(&address)); + let val = U256::from(&*ext.storage_at(&address)?); let gas = if val.is_zero() && !newval.is_zero() { schedule.sstore_set_gas @@ -146,12 +146,12 @@ impl Gasometer { instructions::SUICIDE => { let mut gas = Gas::from(schedule.suicide_gas); - let is_value_transfer = !ext.origin_balance().is_zero(); + let is_value_transfer = !ext.origin_balance()?.is_zero(); let address = u256_to_address(stack.peek(0)); if ( - !schedule.no_empty && !ext.exists(&address) + !schedule.no_empty && !ext.exists(&address)? ) || ( - schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address) + schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)? ) { gas = overflowing!(gas.overflow_add(schedule.suicide_to_new_account_cost.into())); } @@ -198,9 +198,9 @@ impl Gasometer { let is_value_transfer = !stack.peek(2).is_zero(); if instruction == instructions::CALL && ( - (!schedule.no_empty && !ext.exists(&address)) + (!schedule.no_empty && !ext.exists(&address)?) || - (schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)) + (schedule.no_empty && is_value_transfer && !ext.exists_and_not_null(&address)?) ) { gas = overflowing!(gas.overflow_add(schedule.call_new_account_gas.into())); } diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index bc3caa084..79304793e 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -273,7 +273,7 @@ impl Interpreter { let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed"); let contract_code = self.mem.read_slice(init_off, init_size); - let can_create = ext.balance(¶ms.address) >= endowment && ext.depth() < ext.schedule().max_depth; + let can_create = ext.balance(¶ms.address)? >= endowment && ext.depth() < ext.schedule().max_depth; if !can_create { stack.push(U256::zero()); @@ -319,11 +319,11 @@ impl Interpreter { // Get sender & receive addresses, check if we have balance let (sender_address, receive_address, has_balance, call_type) = match instruction { instructions::CALL => { - let has_balance = ext.balance(¶ms.address) >= value.expect("value set for all but delegate call; qed"); + let has_balance = ext.balance(¶ms.address)? >= value.expect("value set for all but delegate call; qed"); (¶ms.address, &code_address, has_balance, CallType::Call) }, instructions::CALLCODE => { - let has_balance = ext.balance(¶ms.address) >= value.expect("value set for all but delegate call; qed"); + let has_balance = ext.balance(¶ms.address)? >= value.expect("value set for all but delegate call; qed"); (¶ms.address, ¶ms.address, has_balance, CallType::CallCode) }, instructions::DELEGATECALL => (¶ms.sender, ¶ms.address, true, CallType::DelegateCall), @@ -366,7 +366,7 @@ impl Interpreter { }, instructions::SUICIDE => { let address = stack.pop_back(); - ext.suicide(&u256_to_address(&address)); + ext.suicide(&u256_to_address(&address))?; return Ok(InstructionResult::StopExecution); }, instructions::LOG0...instructions::LOG4 => { @@ -410,19 +410,19 @@ impl Interpreter { }, instructions::SLOAD => { let key = H256::from(&stack.pop_back()); - let word = U256::from(&*ext.storage_at(&key)); + let word = U256::from(&*ext.storage_at(&key)?); stack.push(word); }, instructions::SSTORE => { let address = H256::from(&stack.pop_back()); let val = stack.pop_back(); - let current_val = U256::from(&*ext.storage_at(&address)); + let current_val = U256::from(&*ext.storage_at(&address)?); // Increase refund for clear if !self.is_zero(¤t_val) && self.is_zero(&val) { ext.inc_sstore_clears(); } - ext.set_storage(address, H256::from(&val)); + ext.set_storage(address, H256::from(&val))?; }, instructions::PC => { stack.push(U256::from(code.position - 1)); @@ -438,7 +438,7 @@ impl Interpreter { }, instructions::BALANCE => { let address = u256_to_address(&stack.pop_back()); - let balance = ext.balance(&address); + let balance = ext.balance(&address)?; stack.push(balance); }, instructions::CALLER => { @@ -474,7 +474,7 @@ impl Interpreter { }, instructions::EXTCODESIZE => { let address = u256_to_address(&stack.pop_back()); - let len = ext.extcodesize(&address); + let len = ext.extcodesize(&address)?; stack.push(U256::from(len)); }, instructions::CALLDATACOPY => { @@ -485,7 +485,7 @@ impl Interpreter { }, instructions::EXTCODECOPY => { let address = u256_to_address(&stack.pop_back()); - let code = ext.extcode(&address); + let code = ext.extcode(&address)?; self.copy_data_to_memory(stack, &code); }, instructions::GASPRICE => { diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 37b53202a..d287857a0 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -123,7 +123,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { mut vm_tracer: V ) -> Result where T: Tracer, V: VMTracer { let sender = t.sender(); - let nonce = self.state.nonce(&sender); + let nonce = self.state.nonce(&sender)?; let schedule = self.engine.schedule(self.info); let base_gas_required = U256::from(t.gas_required(&schedule)); @@ -149,7 +149,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { } // TODO: we might need bigints here, or at least check overflows. - let balance = self.state.balance(&sender); + let balance = self.state.balance(&sender)?; let gas_cost = t.gas.full_mul(t.gas_price); let total_cost = U512::from(t.value) + gas_cost; @@ -160,8 +160,8 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { } // NOTE: there can be no invalid transactions from this point. - self.state.inc_nonce(&sender); - self.state.sub_balance(&sender, &U256::from(gas_cost)); + self.state.inc_nonce(&sender)?; + self.state.sub_balance(&sender, &U256::from(gas_cost))?; let mut substate = Substate::new(); @@ -192,8 +192,8 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { gas: init_gas, gas_price: t.gas_price, value: ActionValue::Transfer(t.value), - code: self.state.code(address), - code_hash: self.state.code_hash(address), + code: self.state.code(address)?, + code_hash: self.state.code_hash(address)?, data: Some(t.data.clone()), call_type: CallType::Call, }; @@ -257,7 +257,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // at first, transfer value to destination if let ActionValue::Transfer(val) = params.value { - self.state.transfer_balance(¶ms.sender, ¶ms.address, &val, substate.to_cleanup_mode(&schedule)); + self.state.transfer_balance(¶ms.sender, ¶ms.address, &val, substate.to_cleanup_mode(&schedule))?; } trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); @@ -322,13 +322,13 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let traces = subtracer.traces(); match res { - Ok(gas_left) => tracer.trace_call( + Ok(ref gas_left) => tracer.trace_call( trace_info, - gas - gas_left, + gas - *gas_left, trace_output, traces ), - Err(e) => tracer.trace_failed_call(trace_info, traces, e.into()), + Err(ref e) => tracer.trace_failed_call(trace_info, traces, e.into()), }; trace!(target: "executive", "substate={:?}; unconfirmed_substate={:?}\n", substate, unconfirmed_substate); @@ -365,9 +365,9 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // create contract and transfer value to it if necessary let schedule = self.engine.schedule(self.info); let nonce_offset = if schedule.no_empty {1} else {0}.into(); - let prev_bal = self.state.balance(¶ms.address); + let prev_bal = self.state.balance(¶ms.address)?; if let ActionValue::Transfer(val) = params.value { - self.state.sub_balance(¶ms.sender, &val); + self.state.sub_balance(¶ms.sender, &val)?; self.state.new_contract(¶ms.address, val + prev_bal, nonce_offset); } else { self.state.new_contract(¶ms.address, prev_bal, nonce_offset); @@ -388,14 +388,14 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { vm_tracer.done_subtrace(subvmtracer); match res { - Ok(gas_left) => tracer.trace_create( + Ok(ref gas_left) => tracer.trace_create( trace_info, - gas - gas_left, + gas - *gas_left, trace_output, created, subtracer.traces() ), - Err(e) => tracer.trace_failed_create(trace_info, subtracer.traces(), e.into()) + Err(ref e) => tracer.trace_failed_create(trace_info, subtracer.traces(), e.into()) }; self.enact_result(&res, substate, unconfirmed_substate); @@ -435,9 +435,9 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let sender = t.sender(); trace!("exec::finalize: Refunding refund_value={}, sender={}\n", refund_value, sender); // Below: NoEmpty is safe since the sender must already be non-null to have sent this transaction - self.state.add_balance(&sender, &refund_value, CleanupMode::NoEmpty); + self.state.add_balance(&sender, &refund_value, CleanupMode::NoEmpty)?; trace!("exec::finalize: Compensating author: fees_value={}, author={}\n", fees_value, &self.info.author); - self.state.add_balance(&self.info.author, &fees_value, substate.to_cleanup_mode(&schedule)); + self.state.add_balance(&self.info.author, &fees_value, substate.to_cleanup_mode(&schedule))?; // perform suicides for address in &substate.suicides { @@ -446,13 +446,13 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // perform garbage-collection for address in &substate.garbage { - if self.state.exists(address) && !self.state.exists_and_not_null(address) { + if self.state.exists(address)? && !self.state.exists_and_not_null(address)? { self.state.kill_account(address); } } match result { - Err(evm::Error::Internal) => Err(ExecutionError::Internal), + Err(evm::Error::Internal(msg)) => Err(ExecutionError::Internal(msg)), Err(exception) => { Ok(Executed { exception: Some(exception), @@ -495,7 +495,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { | Err(evm::Error::OutOfStack {..}) => { self.state.revert_to_checkpoint(); }, - Ok(_) | Err(evm::Error::Internal) => { + Ok(_) | Err(evm::Error::Internal(_)) => { self.state.discard_checkpoint(); substate.accrue(un_substate); } diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 49ed2261e..db4a587d1 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -108,25 +108,25 @@ impl<'a, T: 'a, V: 'a, B: 'a> Externalities<'a, T, V, B> impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> where T: Tracer, V: VMTracer, B: StateBackend { - fn storage_at(&self, key: &H256) -> H256 { + fn storage_at(&self, key: &H256) -> trie::Result { self.state.storage_at(&self.origin_info.address, key) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.state.set_storage(&self.origin_info.address, key, value) } - fn exists(&self, address: &Address) -> bool { + fn exists(&self, address: &Address) -> trie::Result { self.state.exists(address) } - fn exists_and_not_null(&self, address: &Address) -> bool { + fn exists_and_not_null(&self, address: &Address) -> trie::Result { self.state.exists_and_not_null(address) } - fn origin_balance(&self) -> U256 { self.balance(&self.origin_info.address) } + fn origin_balance(&self) -> trie::Result { self.balance(&self.origin_info.address) } - fn balance(&self, address: &Address) -> U256 { + fn balance(&self, address: &Address) -> trie::Result { self.state.balance(address) } @@ -149,7 +149,13 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { // create new contract address - let address = contract_address(&self.origin_info.address, &self.state.nonce(&self.origin_info.address)); + let address = match self.state.nonce(&self.origin_info.address) { + Ok(nonce) => contract_address(&self.origin_info.address, &nonce), + Err(e) => { + debug!(target: "ext", "Database corruption encountered: {:?}", e); + return ContractCreateResult::Failed + } + }; // prepare the params let params = ActionParams { @@ -166,7 +172,10 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> call_type: CallType::None, }; - self.state.inc_nonce(&self.origin_info.address); + if let Err(e) = self.state.inc_nonce(&self.origin_info.address) { + debug!(target: "ext", "Database corruption encountered: {:?}", e); + return ContractCreateResult::Failed + } let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth); // TODO: handle internal error separately @@ -191,6 +200,14 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> ) -> MessageCallResult { trace!(target: "externalities", "call"); + let code_res = self.state.code(code_address) + .and_then(|code| self.state.code_hash(code_address).map(|hash| (code, hash))); + + let (code, code_hash) = match code_res { + Ok((code, hash)) => (code, hash), + Err(_) => return MessageCallResult::Failed, + }; + let mut params = ActionParams { sender: sender_address.clone(), address: receive_address.clone(), @@ -199,8 +216,8 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price, - code: self.state.code(code_address), - code_hash: self.state.code_hash(code_address), + code: code, + code_hash: code_hash, data: Some(data.to_vec()), call_type: call_type, }; @@ -217,12 +234,12 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> } } - fn extcode(&self, address: &Address) -> Arc { - self.state.code(address).unwrap_or_else(|| Arc::new(vec![])) + fn extcode(&self, address: &Address) -> trie::Result> { + Ok(self.state.code(address)?.unwrap_or_else(|| Arc::new(vec![]))) } - fn extcodesize(&self, address: &Address) -> usize { - self.state.code_size(address).unwrap_or(0) + fn extcodesize(&self, address: &Address) -> trie::Result { + Ok(self.state.code_size(address)?.unwrap_or(0)) } #[cfg_attr(feature="dev", allow(match_ref_pats))] @@ -257,10 +274,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> handle_copy(copy); - let mut code = vec![]; - code.extend_from_slice(data); - - self.state.init_code(&self.origin_info.address, code); + self.state.init_code(&self.origin_info.address, data.to_vec())?; Ok(*gas - return_cost) } } @@ -277,19 +291,26 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> }); } - fn suicide(&mut self, refund_address: &Address) { + fn suicide(&mut self, refund_address: &Address) -> trie::Result<()> { let address = self.origin_info.address.clone(); - let balance = self.balance(&address); + let balance = self.balance(&address)?; if &address == refund_address { // TODO [todr] To be consistent with CPP client we set balance to 0 in that case. - self.state.sub_balance(&address, &balance); + self.state.sub_balance(&address, &balance)?; } else { trace!(target: "ext", "Suiciding {} -> {} (xfer: {})", address, refund_address, balance); - self.state.transfer_balance(&address, refund_address, &balance, self.substate.to_cleanup_mode(&self.schedule)); + self.state.transfer_balance( + &address, + refund_address, + &balance, + self.substate.to_cleanup_mode(&self.schedule) + )?; } self.tracer.trace_suicide(address, balance, refund_address.clone()); self.substate.suicides.insert(address); + + Ok(()) } fn schedule(&self) -> &Schedule { diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 9cfd4a4a0..432c58025 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -711,7 +711,7 @@ impl MinerService for Miner { let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; let sender = t.sender(); - let balance = state.balance(&sender); + let balance = state.balance(&sender).map_err(ExecutionError::from)?; let needed_balance = t.value + t.gas * t.gas_price; if balance < needed_balance { // give the sender a sufficient balance @@ -721,7 +721,9 @@ impl MinerService for Miner { let mut ret = Executive::new(&mut state, &env_info, &*self.engine, client.vm_factory()).transact(t, options)?; // TODO gav move this into Executive. - ret.state_diff = original_state.map(|original| state.diff_from(original)); + if let Some(original) = original_state { + ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); + } Ok(ret) }, @@ -729,35 +731,37 @@ impl MinerService for Miner { } } - fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { + // TODO: The `chain.latest_x` actually aren't infallible, they just panic on corruption. + // TODO: return trie::Result here, or other. + fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_balance(address), - |b| b.block().fields().state.balance(address) + || Some(chain.latest_balance(address)), + |b| b.block().fields().state.balance(address).ok(), ) } - fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { + fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_storage_at(address, position), - |b| b.block().fields().state.storage_at(address, position) + || Some(chain.latest_storage_at(address, position)), + |b| b.block().fields().state.storage_at(address, position).ok(), ) } - fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { + fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_nonce(address), - |b| b.block().fields().state.nonce(address) + || Some(chain.latest_nonce(address)), + |b| b.block().fields().state.nonce(address).ok(), ) } - fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { + fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option> { self.from_pending_block( chain.chain_info().best_block_number, - || chain.latest_code(address), - |b| b.block().fields().state.code(address).map(|c| (*c).clone()) + || Some(chain.latest_code(address)), + |b| b.block().fields().state.code(address).ok().map(|c| c.map(|c| (&*c).clone())) ) } diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index eee9d1c5c..e7dc52055 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -62,7 +62,7 @@ pub use self::work_notify::NotifyWork; pub use self::stratum::{Stratum, Error as StratumError, Options as StratumOptions}; use std::collections::BTreeMap; -use util::{H256, U256, Address, Bytes}; +use util::{H256, U256, Address, Bytes, trie}; use client::{MiningBlockChainClient, Executed, CallAnalytics}; use block::ClosedBlock; use header::BlockNumber; @@ -181,19 +181,19 @@ pub trait MinerService : Send + Sync { fn sensible_gas_limit(&self) -> U256 { 21000.into() } /// Latest account balance in pending state. - fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256; + fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; /// Call into contract code using pending state. fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result; /// Get storage value in pending state. - fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256; + fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option; /// Get account nonce in pending state. - fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256; + fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; /// Get contract code in pending state. - fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option; + fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option>; } /// Mining status diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 6e5297bc4..acd1591f7 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -169,22 +169,16 @@ impl Account { /// Get (and cache) the contents of the trie's storage at `key`. /// Takes modifed storage into account. - pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 { + pub fn storage_at(&self, db: &HashDB, key: &H256) -> trie::Result { if let Some(value) = self.cached_storage_at(key) { - return value; + return Ok(value); } - let db = SecTrieDB::new(db, &self.storage_root) - .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ - SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ - using it will not fail."); + let db = SecTrieDB::new(db, &self.storage_root)?; - let item: U256 = match db.get_with(key, ::rlp::decode) { - Ok(x) => x.unwrap_or_else(U256::zero), - Err(e) => panic!("Encountered potential DB corruption: {}", e), - }; + let item: U256 = db.get_with(key, ::rlp::decode)?.unwrap_or_else(U256::zero); let value: H256 = item.into(); self.storage_cache.borrow_mut().insert(key.clone(), value.clone()); - value + Ok(value) } /// Get cached storage value if any. Returns `None` if the @@ -345,24 +339,19 @@ impl Account { } /// Commit the `storage_changes` to the backing DB and update `storage_root`. - pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) { - let mut t = trie_factory.from_existing(db, &mut self.storage_root) - .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ - SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ - using it will not fail."); + pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) -> trie::Result<()> { + let mut t = trie_factory.from_existing(db, &mut self.storage_root)?; for (k, v) in self.storage_changes.drain() { // cast key and value to trait type, // so we can call overloaded `to_bytes` method - let res = match v.is_zero() { - true => t.remove(&k), - false => t.insert(&k, &encode(&U256::from(&*v))), + match v.is_zero() { + true => t.remove(&k)?, + false => t.insert(&k, &encode(&U256::from(&*v)))?, }; - if let Err(e) = res { - warn!("Encountered potential DB corruption: {}", e); - } self.storage_cache.borrow_mut().insert(k, v); } + Ok(()) } /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 1d26ea30f..b88a6a9f1 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -37,6 +37,7 @@ use state_db::StateDB; use util::*; +use util::trie; use util::trie::recorder::Recorder; mod account; @@ -362,37 +363,37 @@ impl State { } /// Determine whether an account exists. - pub fn exists(&self, a: &Address) -> bool { + pub fn exists(&self, a: &Address) -> trie::Result { // Bloom filter does not contain empty accounts, so it is important here to // check if account exists in the database directly before EIP-161 is in effect. self.ensure_cached(a, RequireCache::None, false, |a| a.is_some()) } /// Determine whether an account exists and if not empty. - pub fn exists_and_not_null(&self, a: &Address) -> bool { + pub fn exists_and_not_null(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, false, |a| a.map_or(false, |a| !a.is_null())) } /// Get the balance of account `a`. - pub fn balance(&self, a: &Address) -> U256 { + pub fn balance(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(U256::zero(), |account| *account.balance())) } /// Get the nonce of account `a`. - pub fn nonce(&self, a: &Address) -> U256 { + pub fn nonce(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce())) } /// Get the storage root of account `a`. - pub fn storage_root(&self, a: &Address) -> Option { + pub fn storage_root(&self, a: &Address) -> trie::Result> { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().and_then(|account| account.storage_root().cloned())) } /// Mutate storage of account `address` so that it is `value` for `key`. - pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { + pub fn storage_at(&self, address: &Address, key: &H256) -> trie::Result { // Storage key search and update works like this: // 1. If there's an entry for the account in the local cache check for the key and return it if found. // 2. If there's an entry for the account in the global cache check for the key or load it into that account. @@ -406,42 +407,46 @@ impl State { match maybe_acc.account { Some(ref account) => { if let Some(value) = account.cached_storage_at(key) { - return value; + return Ok(value); } else { local_account = Some(maybe_acc); } }, - _ => return H256::new(), + _ => return Ok(H256::new()), } } // check the global cache and and cache storage key there if found, - // otherwise cache the account localy and cache storage key there. - if let Some(result) = self.db.get_cached(address, |acc| acc.map_or(H256::new(), |a| { + let trie_res = self.db.get_cached(address, |acc| match acc { + None => Ok(H256::new()), + Some(a) => { let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); a.storage_at(account_db.as_hashdb(), key) - })) { - return result; + } + }); + + match trie_res { + None => {} + Some(res) => return res, } + + // otherwise cache the account localy and cache storage key there. if let Some(ref mut acc) = local_account { if let Some(ref account) = acc.account { let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(address)); return account.storage_at(account_db.as_hashdb(), key) } else { - return H256::new() + return Ok(H256::new()) } } } // check if the account could exist before any requests to trie - if self.db.is_known_null(address) { return H256::zero() } + if self.db.is_known_null(address) { return Ok(H256::zero()) } // account is not found in the global cache, get from the DB and insert into local let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let maybe_acc = match db.get_with(address, Account::from_rlp) { - Ok(acc) => acc, - Err(e) => panic!("Potential DB corruption encountered: {}", e), - }; - let r = maybe_acc.as_ref().map_or(H256::new(), |a| { + let maybe_acc = db.get_with(address, Account::from_rlp)?; + let r = maybe_acc.as_ref().map_or(Ok(H256::new()), |a| { let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); a.storage_at(account_db.as_hashdb(), key) }); @@ -450,75 +455,84 @@ impl State { } /// Get accounts' code. - pub fn code(&self, a: &Address) -> Option> { + pub fn code(&self, a: &Address) -> trie::Result>> { self.ensure_cached(a, RequireCache::Code, true, |a| a.as_ref().map_or(None, |a| a.code().clone())) } /// Get an account's code hash. - pub fn code_hash(&self, a: &Address) -> H256 { + pub fn code_hash(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(SHA3_EMPTY, |a| a.code_hash())) } /// Get accounts' code size. - pub fn code_size(&self, a: &Address) -> Option { + pub fn code_size(&self, a: &Address) -> trie::Result> { self.ensure_cached(a, RequireCache::CodeSize, true, |a| a.as_ref().and_then(|a| a.code_size())) } /// Add `incr` to the balance of account `a`. #[cfg_attr(feature="dev", allow(single_match))] - pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) { - trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)); + pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) -> trie::Result<()> { + trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)?); let is_value_transfer = !incr.is_zero(); - if is_value_transfer || (cleanup_mode == CleanupMode::ForceCreate && !self.exists(a)) { - self.require(a, false).add_balance(incr); + if is_value_transfer || (cleanup_mode == CleanupMode::ForceCreate && !self.exists(a)?) { + self.require(a, false)?.add_balance(incr); } else { match cleanup_mode { - CleanupMode::KillEmpty(set) => if !is_value_transfer && self.exists(a) && !self.exists_and_not_null(a) { + CleanupMode::KillEmpty(set) => if !is_value_transfer && self.exists(a)? && !self.exists_and_not_null(a)? { set.insert(a.clone()); }, _ => {} } } + + Ok(()) } /// Subtract `decr` from the balance of account `a`. - pub fn sub_balance(&mut self, a: &Address, decr: &U256) { - trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)); - if !decr.is_zero() || !self.exists(a) { - self.require(a, false).sub_balance(decr); + pub fn sub_balance(&mut self, a: &Address, decr: &U256) -> trie::Result<()> { + trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)?); + if !decr.is_zero() || !self.exists(a)? { + self.require(a, false)?.sub_balance(decr); } + + Ok(()) } /// Subtracts `by` from the balance of `from` and adds it to that of `to`. - pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256, cleanup_mode: CleanupMode) { - self.sub_balance(from, by); - self.add_balance(to, by, cleanup_mode); + pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256, cleanup_mode: CleanupMode) -> trie::Result<()> { + self.sub_balance(from, by)?; + self.add_balance(to, by, cleanup_mode)?; + Ok(()) } /// Increment the nonce of account `a` by 1. - pub fn inc_nonce(&mut self, a: &Address) { - self.require(a, false).inc_nonce() + pub fn inc_nonce(&mut self, a: &Address) -> trie::Result<()> { + self.require(a, false).map(|mut x| x.inc_nonce()) } /// Mutate storage of account `a` so that it is `value` for `key`. - pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) { - if self.storage_at(a, &key) != value { - self.require(a, false).set_storage(key, value) + pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) -> trie::Result<()> { + if self.storage_at(a, &key)? != value { + self.require(a, false)?.set_storage(key, value) } + + Ok(()) } /// Initialise the code of account `a` so that it is `code`. /// NOTE: Account should have been created with `new_contract`. - pub fn init_code(&mut self, a: &Address, code: Bytes) { - self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).init_code(code); + pub fn init_code(&mut self, a: &Address, code: Bytes) -> trie::Result<()> { + self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{})?.init_code(code); + Ok(()) } /// Reset the code of account `a` so that it is `code`. - pub fn reset_code(&mut self, a: &Address, code: Bytes) { - self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).reset_code(code); + pub fn reset_code(&mut self, a: &Address, code: Bytes) -> trie::Result<()> { + self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{})?.reset_code(code); + Ok(()) } /// Execute a given transaction. @@ -629,25 +643,29 @@ impl State { })) } - fn query_pod(&mut self, query: &PodState) { - for (address, pod_account) in query.get().into_iter() - .filter(|&(a, _)| self.ensure_cached(a, RequireCache::Code, true, |a| a.is_some())) - { + fn query_pod(&mut self, query: &PodState) -> trie::Result<()> { + for (address, pod_account) in query.get() { + if !self.ensure_cached(address, RequireCache::Code, true, |a| a.is_some())? { + continue + } + // needs to be split into two parts for the refcell code here // to work. for key in pod_account.storage.keys() { - self.storage_at(address, key); + self.storage_at(address, key)?; } } + + Ok(()) } /// Returns a `StateDiff` describing the difference from `orig` to `self`. /// Consumes self. - pub fn diff_from(&self, orig: State) -> StateDiff { + pub fn diff_from(&self, orig: State) -> trie::Result { let pod_state_post = self.to_pod(); let mut state_pre = orig; - state_pre.query_pod(&pod_state_post); - pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post) + state_pre.query_pod(&pod_state_post)?; + Ok(pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post)) } // load required account data from the databases. @@ -681,16 +699,16 @@ impl State { /// Check caches for required data /// First searches for account in the local, then the shared cache. /// Populates local cache if nothing found. - fn ensure_cached(&self, a: &Address, require: RequireCache, check_null: bool, f: F) -> U + fn ensure_cached(&self, a: &Address, require: RequireCache, check_null: bool, f: F) -> trie::Result where F: Fn(Option<&Account>) -> U { // check local cache first if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) { if let Some(ref mut account) = maybe_acc.account { let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); Self::update_account_cache(require, account, &self.db, accountdb.as_hashdb()); - return f(Some(account)); + return Ok(f(Some(account))); } - return f(None); + return Ok(f(None)); } // check global cache let result = self.db.get_cached(a, |mut acc| { @@ -701,37 +719,34 @@ impl State { f(acc.map(|a| &*a)) }); match result { - Some(r) => r, + Some(r) => Ok(r), None => { // first check if it is not in database for sure - if check_null && self.db.is_known_null(a) { return f(None); } + if check_null && self.db.is_known_null(a) { return Ok(f(None)); } // not found in the global cache, get from the DB and insert into local - let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let mut maybe_acc = match db.get_with(a, Account::from_rlp) { - Ok(acc) => acc, - Err(e) => panic!("Potential DB corruption encountered: {}", e), - }; + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root)?; + let mut maybe_acc = db.get_with(a, Account::from_rlp)?; if let Some(ref mut account) = maybe_acc.as_mut() { let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); Self::update_account_cache(require, account, &self.db, accountdb.as_hashdb()); } let r = f(maybe_acc.as_ref()); self.insert_cache(a, AccountEntry::new_clean(maybe_acc)); - r + Ok(r) } } } /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. - fn require<'a>(&'a self, a: &Address, require_code: bool) -> RefMut<'a, Account> { + fn require<'a>(&'a self, a: &Address, require_code: bool) -> trie::Result> { self.require_or_from(a, require_code, || Account::new_basic(U256::from(0u8), self.account_start_nonce), |_|{}) } /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. /// If it doesn't exist, make account equal the evaluation of `default`. - fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) - -> RefMut<'a, Account> + fn require_or_from<'a, F, G>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) -> trie::Result> + where F: FnOnce() -> Account, G: FnOnce(&mut Account), { let contains_key = self.cache.borrow().contains_key(a); if !contains_key { @@ -739,11 +754,8 @@ impl State { Some(acc) => self.insert_cache(a, AccountEntry::new_clean_cached(acc)), None => { let maybe_acc = if !self.db.is_known_null(a) { - let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - match db.get_with(a, Account::from_rlp) { - Ok(acc) => AccountEntry::new_clean(acc), - Err(e) => panic!("Potential DB corruption encountered: {}", e), - } + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root)?; + AccountEntry::new_clean(db.get_with(a, Account::from_rlp)?) } else { AccountEntry::new_clean(None) }; @@ -754,7 +766,7 @@ impl State { self.note_cache(a); // at this point the entry is guaranteed to be in the cache. - RefMut::map(self.cache.borrow_mut(), |c| { + Ok(RefMut::map(self.cache.borrow_mut(), |c| { let mut entry = c.get_mut(a).expect("entry known to exist in the cache; qed"); match &mut entry.account { @@ -775,18 +787,18 @@ impl State { }, _ => panic!("Required account must always exist; qed"), } - }) + })) } } -// LES state proof implementations. +// State proof implementations; useful for light client protocols. impl State { /// Prove an account's existence or nonexistence in the state trie. /// Returns a merkle proof of the account's trie node with all nodes before `from_level` /// omitted or an encountered trie error. /// Requires a secure trie to be used for accurate results. /// `account_key` == sha3(address) - pub fn prove_account(&self, account_key: H256, from_level: u32) -> Result, Box> { + pub fn prove_account(&self, account_key: H256, from_level: u32) -> trie::Result> { let mut recorder = Recorder::with_depth(from_level); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; trie.get_with(&account_key, &mut recorder)?; @@ -799,7 +811,7 @@ impl State { /// `from_level` omitted. Requires a secure trie to be used for correctness. /// `account_key` == sha3(address) /// `storage_key` == sha3(key) - pub fn prove_storage(&self, account_key: H256, storage_key: H256, from_level: u32) -> Result, Box> { + pub fn prove_storage(&self, account_key: H256, storage_key: H256, from_level: u32) -> trie::Result> { // TODO: probably could look into cache somehow but it's keyed by // address, not sha3(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; @@ -814,7 +826,7 @@ impl State { /// Get code by address hash. /// Only works when backed by a secure trie. - pub fn code_by_address_hash(&self, account_key: H256) -> Result, Box> { + pub fn code_by_address_hash(&self, account_key: H256) -> trie::Result> { let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let mut acc = match trie.get_with(&account_key, Account::from_rlp)? { Some(acc) => acc, diff --git a/ethcore/src/types/executed.rs b/ethcore/src/types/executed.rs index 21858c194..4301044ce 100644 --- a/ethcore/src/types/executed.rs +++ b/ethcore/src/types/executed.rs @@ -16,7 +16,7 @@ //! Transaction execution format module. -use util::{Bytes, U256, Address, U512}; +use util::{Bytes, U256, Address, U512, trie}; use rlp::*; use evm; use trace::{VMTrace, FlatTrace}; @@ -146,27 +146,33 @@ pub enum ExecutionError { got: U512 }, /// Returned when internal evm error occurs. - Internal, + Internal(String), /// Returned when generic transaction occurs TransactionMalformed(String), } +impl From> for ExecutionError { + fn from(err: Box) -> Self { + ExecutionError::Internal(format!("{}", err)) + } +} + impl fmt::Display for ExecutionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::ExecutionError::*; let msg = match *self { - NotEnoughBaseGas { required, got } => + NotEnoughBaseGas { ref required, ref got } => format!("Not enough base gas. {} is required, but only {} paid", required, got), - BlockGasLimitReached { gas_limit, gas_used, gas } => + BlockGasLimitReached { ref gas_limit, ref gas_used, ref gas } => format!("Block gas limit reached. The limit is {}, {} has \ already been used, and {} more is required", gas_limit, gas_used, gas), - InvalidNonce { expected, got } => + InvalidNonce { ref expected, ref got } => format!("Invalid transaction nonce: expected {}, found {}", expected, got), - NotEnoughCash { required, got } => + NotEnoughCash { ref required, ref got } => format!("Cost of transaction exceeds sender balance. {} is required \ but the sender only has {}", required, got), - Internal => "Internal evm error".into(), + Internal(ref msg) => msg.clone(), TransactionMalformed(ref err) => format!("Malformed transaction: {}", err), }; @@ -184,6 +190,8 @@ pub enum CallError { StatePruned, /// Couldn't find an amount of gas that didn't result in an exception. Exceptional, + /// Corrupt state. + StateCorrupt, /// Error executing. Execution(ExecutionError), } @@ -202,6 +210,7 @@ impl fmt::Display for CallError { TransactionNotFound => "Transaction couldn't be found in the chain".into(), StatePruned => "Couldn't find the transaction block's state in the chain".into(), Exceptional => "An exception happened in the execution".into(), + StateCorrupt => "Stored state found to be corrupted.".into(), Execution(ref e) => format!("{}", e), }; diff --git a/ethcore/src/types/trace_types/error.rs b/ethcore/src/types/trace_types/error.rs index 7eb16570c..ea3d32679 100644 --- a/ethcore/src/types/trace_types/error.rs +++ b/ethcore/src/types/trace_types/error.rs @@ -40,19 +40,25 @@ pub enum Error { Internal, } -impl From for Error { - fn from(e: EvmError) -> Self { - match e { +impl<'a> From<&'a EvmError> for Error { + fn from(e: &'a EvmError) -> Self { + match *e { EvmError::OutOfGas => Error::OutOfGas, EvmError::BadJumpDestination { .. } => Error::BadJumpDestination, EvmError::BadInstruction { .. } => Error::BadInstruction, EvmError::StackUnderflow { .. } => Error::StackUnderflow, EvmError::OutOfStack { .. } => Error::OutOfStack, - EvmError::Internal => Error::Internal, + EvmError::Internal(_) => Error::Internal, } } } +impl From for Error { + fn from(e: EvmError) -> Self { + Error::from(&e) + } +} + impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; From 91753c53cd5a72de764b5354eeff4d1c59980d40 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 18:41:01 +0100 Subject: [PATCH 02/89] fix tests, json tests --- ethcore/src/engines/authority_round.rs | 8 +- ethcore/src/engines/tendermint/mod.rs | 8 +- ethcore/src/ethereum/ethash.rs | 30 +++- ethcore/src/ethereum/mod.rs | 12 +- ethcore/src/evm/tests.rs | 31 ++-- ethcore/src/executive.rs | 50 +++--- ethcore/src/externalities.rs | 2 +- ethcore/src/json_tests/executive.rs | 60 ++++--- ethcore/src/miner/miner.rs | 3 +- ethcore/src/miner/mod.rs | 2 +- ethcore/src/spec/spec.rs | 2 +- ethcore/src/state/account.rs | 14 +- ethcore/src/state/mod.rs | 224 ++++++++++++------------- ethcore/src/tests/client.rs | 4 +- 14 files changed, 245 insertions(+), 205 deletions(-) diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index 4f99a644e..e4efdaea1 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -250,10 +250,12 @@ impl Engine for AuthorityRound { fn on_close_block(&self, block: &mut ExecutedBlock) { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty); + let res = fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty) + .map_err(::error::Error::from) + .and_then(|_| fields.state.commit()); // Commit state so that we can actually figure out the state root. - if let Err(e) = fields.state.commit() { - warn!("Encountered error on state commit: {}", e); + if let Err(e) = res { + warn!("Encountered error on closing block: {}", e); } } diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index 47117f83a..9b750a221 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -467,10 +467,12 @@ impl Engine for Tendermint { fn on_close_block(&self, block: &mut ExecutedBlock) { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty); + let res = fields.state.add_balance(fields.header.author(), &self.block_reward, CleanupMode::NoEmpty) + .map_err(::error::Error::from) + .and_then(|_| fields.state.commit()); // Commit state so that we can actually figure out the state root. - if let Err(e) = fields.state.commit() { - warn!("Encountered error on state commit: {}", e); + if let Err(e) = res { + warn!("Encountered error on closing block: {}", e); } } diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 3c9196a85..67c956cdc 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -213,13 +213,13 @@ impl Engine for Ethash { if block.fields().header.number() == self.ethash_params.dao_hardfork_transition { // TODO: enable trigger function maybe? // if block.fields().header.gas_limit() <= 4_000_000.into() { - let mut state = block.fields_mut().state; + let state = block.fields_mut().state; for child in &self.ethash_params.dao_hardfork_accounts { let beneficiary = &self.ethash_params.dao_hardfork_beneficiary; let res = state.balance(child) .and_then(|b| state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty)); - if let Err(e) = res { + if let Err(_) = res { warn!("Unable to apply DAO hardfork due to database corruption."); warn!("Your node is now likely out of consensus."); } @@ -235,12 +235,28 @@ impl Engine for Ethash { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())), CleanupMode::NoEmpty); + let res = fields.state.add_balance( + fields.header.author(), + &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())), + CleanupMode::NoEmpty + ); + + if let Err(e) = res { + warn!("Failed to give block reward: {}", e); + } // Bestow uncle rewards let current_number = fields.header.number(); for u in fields.uncles.iter() { - fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)), CleanupMode::NoEmpty); + let res = fields.state.add_balance( + u.author(), + &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)), + CleanupMode::NoEmpty + ); + + if let Err(e) = res { + warn!("Failed to give uncle reward: {}", e); + } } // Commit state so that we can actually figure out the state root. @@ -473,7 +489,7 @@ mod tests { let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close(); - assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap()); + assert_eq!(b.state().balance(&Address::zero()).unwrap(), U256::from_str("4563918244f40000").unwrap()); } #[test] @@ -491,8 +507,8 @@ mod tests { b.push_uncle(uncle).unwrap(); let b = b.close(); - assert_eq!(b.state().balance(&Address::zero()), "478eae0e571ba000".into()); - assert_eq!(b.state().balance(&uncle_author), "3cb71f51fc558000".into()); + assert_eq!(b.state().balance(&Address::zero()).unwrap(), "478eae0e571ba000".into()); + assert_eq!(b.state().balance(&uncle_author).unwrap(), "3cb71f51fc558000".into()); } #[test] diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index c8eb44911..b15c9e4de 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -91,12 +91,12 @@ mod tests { let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap(); let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap(); - assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000003".into()), 1u64.into()); - assert_eq!(s.balance(&"0000000000000000000000000000000000000004".into()), 1u64.into()); - assert_eq!(s.balance(&"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c".into()), U256::from(1u64) << 200); - assert_eq!(s.balance(&"0000000000000000000000000000000000000000".into()), 0u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000003".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"0000000000000000000000000000000000000004".into()).unwrap(), 1u64.into()); + assert_eq!(s.balance(&"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c".into()).unwrap(), U256::from(1u64) << 200); + assert_eq!(s.balance(&"0000000000000000000000000000000000000000".into()).unwrap(), 0u64.into()); } #[test] diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index ccf711a40..3002c170c 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -82,28 +82,29 @@ impl Default for Schedule { } impl Ext for FakeExt { - fn storage_at(&self, key: &H256) -> H256 { - self.store.get(key).unwrap_or(&H256::new()).clone() + fn storage_at(&self, key: &H256) -> trie::Result { + Ok(self.store.get(key).unwrap_or(&H256::new()).clone()) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.store.insert(key, value); + Ok(()) } - fn exists(&self, address: &Address) -> bool { - self.balances.contains_key(address) + fn exists(&self, address: &Address) -> trie::Result { + Ok(self.balances.contains_key(address)) } - fn exists_and_not_null(&self, address: &Address) -> bool { - self.balances.get(address).map_or(false, |b| !b.is_zero()) + fn exists_and_not_null(&self, address: &Address) -> trie::Result { + Ok(self.balances.get(address).map_or(false, |b| !b.is_zero())) } - fn origin_balance(&self) -> U256 { + fn origin_balance(&self) -> trie::Result { unimplemented!() } - fn balance(&self, address: &Address) -> U256 { - self.balances[address] + fn balance(&self, address: &Address) -> trie::Result { + Ok(self.balances[address]) } fn blockhash(&self, number: &U256) -> H256 { @@ -146,12 +147,12 @@ impl Ext for FakeExt { MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Arc { - self.codes.get(address).unwrap_or(&Arc::new(Bytes::new())).clone() + fn extcode(&self, address: &Address) -> trie::Result> { + Ok(self.codes.get(address).unwrap_or(&Arc::new(Bytes::new())).clone()) } - fn extcodesize(&self, address: &Address) -> usize { - self.codes.get(address).map_or(0, |c| c.len()) + fn extcodesize(&self, address: &Address) -> trie::Result { + Ok(self.codes.get(address).map_or(0, |c| c.len())) } fn log(&mut self, topics: Vec, data: &[u8]) { @@ -165,7 +166,7 @@ impl Ext for FakeExt { unimplemented!(); } - fn suicide(&mut self, _refund_address: &Address) { + fn suicide(&mut self, _refund_address: &Address) -> trie::Result<()> { unimplemented!(); } diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index d287857a0..d9f1b7413 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -544,7 +544,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(0x7)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -555,9 +555,9 @@ mod tests { }; assert_eq!(gas_left, U256::from(79_975)); - assert_eq!(state.storage_at(&address, &H256::new()), H256::from(&U256::from(0xf9u64))); - assert_eq!(state.balance(&sender), U256::from(0xf9)); - assert_eq!(state.balance(&address), U256::from(0x7)); + assert_eq!(state.storage_at(&address, &H256::new()).unwrap(), H256::from(&U256::from(0xf9u64))); + assert_eq!(state.balance(&sender).unwrap(), U256::from(0xf9)); + assert_eq!(state.balance(&address).unwrap(), U256::from(0x7)); // 0 cause contract hasn't returned assert_eq!(substate.contracts_created.len(), 0); @@ -603,7 +603,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -662,7 +662,7 @@ mod tests { params.call_type = CallType::Call; let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(5); let mut substate = Substate::new(); @@ -773,7 +773,7 @@ mod tests { params.value = ActionValue::Transfer(100.into()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(5); let mut substate = Substate::new(); @@ -861,7 +861,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -913,7 +913,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(1024); let mut substate = Substate::new(); @@ -971,9 +971,9 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.init_code(&address_a, code_a.clone()); - state.init_code(&address_b, code_b.clone()); - state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty); + state.init_code(&address_a, code_a.clone()).unwrap(); + state.init_code(&address_b, code_b.clone()).unwrap(); + state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); @@ -985,7 +985,7 @@ mod tests { }; assert_eq!(gas_left, U256::from(73_237)); - assert_eq!(state.storage_at(&address_a, &H256::from(&U256::from(0x23))), H256::from(&U256::from(1))); + assert_eq!(state.storage_at(&address_a, &H256::from(&U256::from(0x23))).unwrap(), H256::from(&U256::from(1))); } // test is incorrect, mk @@ -1019,7 +1019,7 @@ mod tests { params.code = Some(Arc::new(code.clone())); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.init_code(&address, code); + state.init_code(&address, code).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -1030,8 +1030,8 @@ mod tests { }; assert_eq!(gas_left, U256::from(59_870)); - assert_eq!(state.storage_at(&address, &H256::from(&U256::zero())), H256::from(&U256::from(1))); - assert_eq!(state.storage_at(&address, &H256::from(&U256::one())), H256::from(&U256::from(1))); + assert_eq!(state.storage_at(&address, &H256::from(&U256::zero())).unwrap(), H256::from(&U256::from(1))); + assert_eq!(state.storage_at(&address, &H256::from(&U256::one())).unwrap(), H256::from(&U256::from(1))); } // test is incorrect, mk @@ -1052,7 +1052,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_limit = U256::from(100_000); let engine = TestEngine::new(0); @@ -1069,10 +1069,10 @@ mod tests { assert_eq!(executed.cumulative_gas_used, U256::from(41_301)); assert_eq!(executed.logs.len(), 0); assert_eq!(executed.contracts_created.len(), 0); - assert_eq!(state.balance(&sender), U256::from(1)); - assert_eq!(state.balance(&contract), U256::from(17)); - assert_eq!(state.nonce(&sender), U256::from(1)); - assert_eq!(state.storage_at(&contract, &H256::new()), H256::from(&U256::from(1))); + assert_eq!(state.balance(&sender).unwrap(), U256::from(1)); + assert_eq!(state.balance(&contract).unwrap(), U256::from(17)); + assert_eq!(state.nonce(&sender).unwrap(), U256::from(1)); + assert_eq!(state.storage_at(&contract, &H256::new()).unwrap(), H256::from(&U256::from(1))); } evm_test!{test_transact_invalid_nonce: test_transact_invalid_nonce_jit, test_transact_invalid_nonce_int} @@ -1090,7 +1090,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_limit = U256::from(100_000); let engine = TestEngine::new(0); @@ -1123,7 +1123,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_used = U256::from(20_000); info.gas_limit = U256::from(100_000); @@ -1158,7 +1158,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty).unwrap(); let mut info = EnvInfo::default(); info.gas_limit = U256::from(100_000); let engine = TestEngine::new(0); @@ -1193,7 +1193,7 @@ mod tests { params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty); + state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty).unwrap(); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index db4a587d1..893ba03be 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -506,7 +506,7 @@ mod tests { { let vm_factory = Default::default(); let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer); - ext.suicide(refund_account); + ext.suicide(refund_account).unwrap(); } assert_eq!(setup.sub_state.suicides.len(), 1); diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index f2e73ba97..844fa08f5 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -74,39 +74,39 @@ impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B> address: Address, tracer: &'a mut T, vm_tracer: &'a mut V, - ) -> Self { - TestExt { - contract_address: contract_address(&address, &state.nonce(&address)), + ) -> trie::Result { + Ok(TestExt { + contract_address: contract_address(&address, &state.nonce(&address)?), ext: Externalities::new(state, info, engine, vm_factory, depth, origin_info, substate, output, tracer, vm_tracer), callcreates: vec![] - } + }) } } impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> where T: Tracer, V: VMTracer, B: StateBackend { - fn storage_at(&self, key: &H256) -> H256 { + fn storage_at(&self, key: &H256) -> trie::Result { self.ext.storage_at(key) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.ext.set_storage(key, value) } - fn exists(&self, address: &Address) -> bool { + fn exists(&self, address: &Address) -> trie::Result { self.ext.exists(address) } - fn exists_and_not_null(&self, address: &Address) -> bool { + fn exists_and_not_null(&self, address: &Address) -> trie::Result { self.ext.exists_and_not_null(address) } - fn balance(&self, address: &Address) -> U256 { + fn balance(&self, address: &Address) -> trie::Result { self.ext.balance(address) } - fn origin_balance(&self) -> U256 { + fn origin_balance(&self) -> trie::Result { self.ext.origin_balance() } @@ -143,11 +143,11 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Arc { + fn extcode(&self, address: &Address) -> trie::Result> { self.ext.extcode(address) } - fn extcodesize(&self, address: &Address) -> usize { + fn extcodesize(&self, address: &Address) -> trie::Result { self.ext.extcodesize(address) } @@ -159,7 +159,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> self.ext.ret(gas, data) } - fn suicide(&mut self, refund_address: &Address) { + fn suicide(&mut self, refund_address: &Address) -> trie::Result<()> { self.ext.suicide(refund_address) } @@ -201,6 +201,19 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { fail = true }; + macro_rules! try_fail { + ($e: expr) => { + match $e { + Ok(x) => x, + Err(e) => { + let msg = format!("Internal error: {}", e); + fail_unless(false, &msg); + continue + } + } + } + } + let out_of_gas = vm.out_of_gas(); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -217,7 +230,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { // execute let (res, callcreates) = { - let mut ex = TestExt::new( + let mut ex = try_fail!(TestExt::new( &mut state, &info, &engine, @@ -229,7 +242,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { params.address.clone(), &mut tracer, &mut vm_tracer, - ); + )); let mut evm = vm_factory.create(params.gas); let res = evm.exec(params, &mut ex); // a return in finalize will not alter callcreates @@ -248,14 +261,19 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { for (address, account) in vm.post_state.unwrap().into_iter() { let address = address.into(); let code: Vec = account.code.into(); - fail_unless(state.code(&address).as_ref().map_or_else(|| code.is_empty(), |c| &**c == &code), "code is incorrect"); - fail_unless(state.balance(&address) == account.balance.into(), "balance is incorrect"); - fail_unless(state.nonce(&address) == account.nonce.into(), "nonce is incorrect"); - account.storage.into_iter().foreach(|(k, v)| { + let found_code = try_fail!(state.code(&address)); + let found_balance = try_fail!(state.balance(&address)); + let found_nonce = try_fail!(state.nonce(&address)); + + fail_unless(found_code.as_ref().map_or_else(|| code.is_empty(), |c| &**c == &code), "code is incorrect"); + fail_unless(found_balance == account.balance.into(), "balance is incorrect"); + fail_unless(found_nonce == account.nonce.into(), "nonce is incorrect"); + for (k, v) in account.storage { let key: U256 = k.into(); let value: U256 = v.into(); - fail_unless(state.storage_at(&address, &From::from(key)) == From::from(value), "storage is incorrect"); - }); + let found_storage = try_fail!(state.storage_at(&address, &From::from(key))); + fail_unless(found_storage == From::from(value), "storage is incorrect"); + } } let calls: Option> = vm.calls.map(|c| c.into_iter().map(From::from).collect()); diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 432c58025..99c22f88e 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -715,7 +715,8 @@ impl MinerService for Miner { let needed_balance = t.value + t.gas * t.gas_price; if balance < needed_balance { // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty); + state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty) + .map_err(ExecutionError::from)?; } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; let mut ret = Executive::new(&mut state, &env_info, &*self.engine, client.vm_factory()).transact(t, options)?; diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index e7dc52055..74e1cb598 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -62,7 +62,7 @@ pub use self::work_notify::NotifyWork; pub use self::stratum::{Stratum, Error as StratumError, Options as StratumOptions}; use std::collections::BTreeMap; -use util::{H256, U256, Address, Bytes, trie}; +use util::{H256, U256, Address, Bytes}; use client::{MiningBlockChainClient, Executed, CallAnalytics}; use block::ClosedBlock; use header::BlockNumber; diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 85996d24b..67e21208b 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -389,6 +389,6 @@ mod tests { let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap(); let state = State::from_existing(db.boxed_clone(), spec.state_root(), spec.engine.account_start_nonce(), Default::default()).unwrap(); let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); - assert_eq!(state.storage_at(&Address::from_str("0000000000000000000000000000000000000005").unwrap(), &H256::zero()), expected); + assert_eq!(state.storage_at(&Address::from_str("0000000000000000000000000000000000000005").unwrap(), &H256::zero()).unwrap(), expected); } } diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index acd1591f7..ebdf36d89 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -483,7 +483,7 @@ mod tests { let rlp = { let mut a = Account::new_contract(69.into(), 0.into()); a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64))); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); a.init_code(vec![]); a.commit_code(&mut db); a.rlp() @@ -491,8 +491,8 @@ mod tests { let a = Account::from_rlp(&rlp); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); - assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x00u64))), H256::from(&U256::from(0x1234u64))); - assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x01u64))), H256::new()); + assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x00u64))).unwrap(), H256::from(&U256::from(0x1234u64))); + assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x01u64))).unwrap(), H256::new()); } #[test] @@ -521,7 +521,7 @@ mod tests { let mut db = AccountDBMut::new(&mut db, &Address::new()); a.set_storage(0.into(), 0x1234.into()); assert_eq!(a.storage_root(), None); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); } @@ -531,11 +531,11 @@ mod tests { let mut db = MemoryDB::new(); let mut db = AccountDBMut::new(&mut db, &Address::new()); a.set_storage(0.into(), 0x1234.into()); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); a.set_storage(1.into(), 0x1234.into()); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); a.set_storage(1.into(), 0.into()); - a.commit_storage(&Default::default(), &mut db); + a.commit_storage(&Default::default(), &mut db).unwrap(); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index b88a6a9f1..e25d7d404 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -573,7 +573,7 @@ impl State { let addr_hash = account.address_hash(address); { let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash); - account.commit_storage(&factories.trie, account_db.as_hashdb_mut()); + account.commit_storage(&factories.trie, account_db.as_hashdb_mut())?; account.commit_code(account_db.as_hashdb_mut()); } if !account.is_empty() { @@ -911,7 +911,7 @@ mod tests { data: FromHex::from_hex("601080600c6000396000f3006000355415600957005b60203560003555").unwrap(), }.sign(&secret(), None); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -941,13 +941,13 @@ mod tests { let temp = RandomTempPath::new(); let mut state = { let mut state = get_temp_state_in(temp.as_path()); - assert_eq!(state.exists(&a), false); - state.inc_nonce(&a); + assert_eq!(state.exists(&a).unwrap(), false); + state.inc_nonce(&a).unwrap(); state.commit().unwrap(); state.clone() }; - state.inc_nonce(&a); + state.inc_nonce(&a).unwrap(); state.commit().unwrap(); } @@ -971,7 +971,7 @@ mod tests { data: FromHex::from_hex("5b600056").unwrap(), }.sign(&secret(), None); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1008,8 +1008,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1051,7 +1051,7 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1135,7 +1135,7 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()); + state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()).unwrap(); let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1178,8 +1178,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1240,8 +1240,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); + state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1299,8 +1299,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1339,9 +1339,9 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1399,8 +1399,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1454,8 +1454,8 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()); // not enough funds. - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()).unwrap(); // not enough funds. + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1497,9 +1497,9 @@ mod tests { data: vec![],//600480600b6000396000f35b600056 }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1553,10 +1553,10 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()); - state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1628,10 +1628,10 @@ mod tests { data: vec![],//600480600b6000396000f35b600056 }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); - state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()); - state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()); - state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); + state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()).unwrap(); + state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); + state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { @@ -1701,9 +1701,9 @@ mod tests { data: vec![], }.sign(&secret(), None); - state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()); - state.add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty); - state.add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty); + state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()).unwrap(); + state.add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty).unwrap(); + state.add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty).unwrap(); let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1740,16 +1740,16 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}); - state.init_code(&a, vec![1, 2, 3]); - assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); + state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}).unwrap(); + state.init_code(&a, vec![1, 2, 3]).unwrap(); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec()))); state.commit().unwrap(); - assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec()))); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); + assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec()))); } #[test] @@ -1758,13 +1758,13 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))); + state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))).unwrap(); state.commit().unwrap(); state.drop() }; let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))), H256::from(&U256::from(69u64))); + assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))).unwrap(), H256::from(&U256::from(69u64))); } #[test] @@ -1773,16 +1773,16 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.inc_nonce(&a); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); + state.inc_nonce(&a).unwrap(); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.balance(&a), U256::from(69u64)); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); } #[test] @@ -1790,16 +1790,16 @@ mod tests { let a = Address::zero(); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - assert_eq!(state.exists(&a), false); - assert_eq!(state.exists_and_not_null(&a), false); - state.inc_nonce(&a); - assert_eq!(state.exists(&a), true); - assert_eq!(state.exists_and_not_null(&a), true); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.exists_and_not_null(&a).unwrap(), false); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.exists_and_not_null(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); state.kill_account(&a); - assert_eq!(state.exists(&a), false); - assert_eq!(state.exists_and_not_null(&a), false); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.exists_and_not_null(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); } #[test] @@ -1809,13 +1809,13 @@ mod tests { let db = get_temp_state_db_in(path.as_path()); let (root, db) = { let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &U256::default(), CleanupMode::NoEmpty); // create an empty account + state.add_balance(&a, &U256::default(), CleanupMode::NoEmpty).unwrap(); // create an empty account state.commit().unwrap(); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert!(!state.exists(&a)); - assert!(!state.exists_and_not_null(&a)); + assert!(!state.exists(&a).unwrap()); + assert!(!state.exists_and_not_null(&a).unwrap()); } #[test] @@ -1825,13 +1825,13 @@ mod tests { let db = get_temp_state_db_in(path.as_path()); let (root, db) = { let mut state = State::new(db, U256::from(0), Default::default()); - state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate); // create an empty account + state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate).unwrap(); // create an empty account state.commit().unwrap(); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert!(state.exists(&a)); - assert!(!state.exists_and_not_null(&a)); + assert!(state.exists(&a).unwrap()); + assert!(!state.exists_and_not_null(&a).unwrap()); } #[test] @@ -1840,27 +1840,27 @@ mod tests { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.inc_nonce(&a); + state.inc_nonce(&a).unwrap(); state.commit().unwrap(); - assert_eq!(state.exists(&a), true); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); state.drop() }; let (root, db) = { let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.exists(&a), true); - assert_eq!(state.nonce(&a), U256::from(1u64)); + assert_eq!(state.exists(&a).unwrap(), true); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); state.kill_account(&a); state.commit().unwrap(); - assert_eq!(state.exists(&a), false); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.exists(&a), false); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.exists(&a).unwrap(), false); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); } #[test] @@ -1869,20 +1869,20 @@ mod tests { let mut state = state_result.reference_mut(); let a = Address::zero(); let b = 1u64.into(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(69u64)); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(69u64)); - state.sub_balance(&a, &U256::from(42u64)); - assert_eq!(state.balance(&a), U256::from(27u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); + state.sub_balance(&a, &U256::from(42u64)).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(27u64)); - state.transfer_balance(&a, &b, &U256::from(18u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(9u64)); - assert_eq!(state.balance(&b), U256::from(18u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(27u64)); + state.transfer_balance(&a, &b, &U256::from(18u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); + assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(9u64)); - assert_eq!(state.balance(&b), U256::from(18u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(9u64)); + assert_eq!(state.balance(&b).unwrap(), U256::from(18u64)); } #[test] @@ -1890,16 +1890,16 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); let a = Address::zero(); - state.inc_nonce(&a); - assert_eq!(state.nonce(&a), U256::from(1u64)); - state.inc_nonce(&a); - assert_eq!(state.nonce(&a), U256::from(2u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); state.commit().unwrap(); - assert_eq!(state.nonce(&a), U256::from(2u64)); - state.inc_nonce(&a); - assert_eq!(state.nonce(&a), U256::from(3u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(2u64)); + state.inc_nonce(&a).unwrap(); + assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); state.commit().unwrap(); - assert_eq!(state.nonce(&a), U256::from(3u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(3u64)); } #[test] @@ -1907,11 +1907,11 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); let a = Address::zero(); - assert_eq!(state.balance(&a), U256::from(0u64)); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); state.commit().unwrap(); - assert_eq!(state.balance(&a), U256::from(0u64)); - assert_eq!(state.nonce(&a), U256::from(0u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); + assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); } #[test] @@ -1919,7 +1919,7 @@ mod tests { let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); let a = Address::zero(); - state.require(&a, false); + state.require(&a, false).unwrap(); state.commit().unwrap(); assert_eq!(state.root().hex(), "0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785"); } @@ -1930,15 +1930,15 @@ mod tests { let mut state = state_result.reference_mut(); let a = Address::zero(); state.checkpoint(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(69u64)); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.discard_checkpoint(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.checkpoint(); - state.add_balance(&a, &U256::from(1u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(70u64)); + state.add_balance(&a, &U256::from(1u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(70u64)); state.revert_to_checkpoint(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); } #[test] @@ -1948,12 +1948,12 @@ mod tests { let a = Address::zero(); state.checkpoint(); state.checkpoint(); - state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty); - assert_eq!(state.balance(&a), U256::from(69u64)); + state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.discard_checkpoint(); - assert_eq!(state.balance(&a), U256::from(69u64)); + assert_eq!(state.balance(&a).unwrap(), U256::from(69u64)); state.revert_to_checkpoint(); - assert_eq!(state.balance(&a), U256::from(0)); + assert_eq!(state.balance(&a).unwrap(), U256::from(0)); } #[test] @@ -1970,14 +1970,14 @@ mod tests { let mut state = state.reference().clone(); let a: Address = 0xa.into(); - state.init_code(&a, b"abcdefg".to_vec()); - state.add_balance(&a, &256.into(), CleanupMode::NoEmpty); - state.set_storage(&a, 0xb.into(), 0xc.into()); + state.init_code(&a, b"abcdefg".to_vec()).unwrap();; + state.add_balance(&a, &256.into(), CleanupMode::NoEmpty).unwrap(); + state.set_storage(&a, 0xb.into(), 0xc.into()).unwrap(); let mut new_state = state.clone(); - new_state.set_storage(&a, 0xb.into(), 0xd.into()); + new_state.set_storage(&a, 0xb.into(), 0xd.into()).unwrap(); - new_state.diff_from(state); + new_state.diff_from(state).unwrap(); } } diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index d37551231..809604b13 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -292,7 +292,7 @@ fn change_history_size() { for _ in 0..20 { let mut b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]); - b.block_mut().fields_mut().state.add_balance(&address, &5.into(), CleanupMode::NoEmpty); + b.block_mut().fields_mut().state.add_balance(&address, &5.into(), CleanupMode::NoEmpty).unwrap(); b.block_mut().fields_mut().state.commit().unwrap(); let b = b.close_and_lock().seal(&*test_spec.engine, vec![]).unwrap(); client.import_sealed_block(b).unwrap(); // account change is in the journal overlay @@ -307,7 +307,7 @@ fn change_history_size() { Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected(), ).unwrap(); - assert_eq!(client.state().balance(&address), 100.into()); + assert_eq!(client.state().balance(&address).unwrap(), 100.into()); } #[test] From f169c8dbb05789abafb252c6fb71a6aad34c12f1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 19:17:05 +0100 Subject: [PATCH 03/89] fix remainder of build --- evmbin/src/ext.rs | 23 ++++++++-------- rpc/src/v1/helpers/errors.rs | 5 ++++ rpc/src/v1/impls/eth.rs | 32 ++++++++++++++++++++--- rpc/src/v1/tests/eth.rs | 1 - rpc/src/v1/tests/helpers/miner_service.rs | 29 ++++++++++++++------ 5 files changed, 66 insertions(+), 24 deletions(-) diff --git a/evmbin/src/ext.rs b/evmbin/src/ext.rs index 6492f4fdc..bcce9adc1 100644 --- a/evmbin/src/ext.rs +++ b/evmbin/src/ext.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::collections::HashMap; -use util::{U256, H256, Address, Bytes, FixedHash}; +use util::{U256, H256, Address, Bytes, FixedHash, trie}; use ethcore::client::EnvInfo; use ethcore::evm::{self, Ext, ContractCreateResult, MessageCallResult, Schedule, CallType}; @@ -39,27 +39,28 @@ impl Default for FakeExt { } impl Ext for FakeExt { - fn storage_at(&self, key: &H256) -> H256 { - self.store.get(key).unwrap_or(&H256::new()).clone() + fn storage_at(&self, key: &H256) -> trie::Result { + Ok(self.store.get(key).unwrap_or(&H256::new()).clone()) } - fn set_storage(&mut self, key: H256, value: H256) { + fn set_storage(&mut self, key: H256, value: H256) -> trie::Result<()> { self.store.insert(key, value); + Ok(()) } - fn exists(&self, _address: &Address) -> bool { + fn exists(&self, _address: &Address) -> trie::Result { unimplemented!(); } - fn exists_and_not_null(&self, _address: &Address) -> bool { + fn exists_and_not_null(&self, _address: &Address) -> trie::Result { unimplemented!(); } - fn origin_balance(&self) -> U256 { + fn origin_balance(&self) -> trie::Result { unimplemented!(); } - fn balance(&self, _address: &Address) -> U256 { + fn balance(&self, _address: &Address) -> trie::Result { unimplemented!(); } @@ -83,11 +84,11 @@ impl Ext for FakeExt { unimplemented!(); } - fn extcode(&self, _address: &Address) -> Arc { + fn extcode(&self, _address: &Address) -> trie::Result> { unimplemented!(); } - fn extcodesize(&self, _address: &Address) -> usize { + fn extcodesize(&self, _address: &Address) -> trie::Result { unimplemented!(); } @@ -99,7 +100,7 @@ impl Ext for FakeExt { Ok(*gas) } - fn suicide(&mut self, _refund_address: &Address) { + fn suicide(&mut self, _refund_address: &Address) -> trie::Result<()> { unimplemented!(); } diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 93d23b1aa..7b0891246 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -124,6 +124,10 @@ pub fn state_pruned() -> Error { } } +pub fn state_corrupt() -> Error { + internal("State corrupt", "") +} + pub fn exceptional() -> Error { Error { code: ErrorCode::ServerError(codes::EXCEPTION_ERROR), @@ -288,6 +292,7 @@ pub fn from_rlp_error(error: DecoderError) -> Error { pub fn from_call_error(error: CallError) -> Error { match error { CallError::StatePruned => state_pruned(), + CallError::StateCorrupt => state_corrupt(), CallError::Exceptional => exceptional(), CallError::Execution(e) => execution(e), CallError::TransactionNotFound => internal("{}, this should not be the case with eth_call, most likely a bug.", CallError::TransactionNotFound), diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 01627ba28..f47ab2055 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -349,7 +349,13 @@ impl Eth for EthClient where let address = address.into(); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).balance(&*take_weakf!(self.client), &address).into()), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).balance(&*client, &address) { + Some(balance) => Ok(balance.into()), + None => Err(errors::internal("Unable to load balance from database", "")) + } + } id => { let client = take_weakf!(self.client); @@ -369,7 +375,13 @@ impl Eth for EthClient where let position: U256 = RpcU256::into(pos); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).storage_at(&*take_weakf!(self.client), &address, &H256::from(position)).into()), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).storage_at(&*client, &address, &H256::from(position)) { + Some(s) => Ok(s.into()), + None => Err(errors::internal("Unable to load storage from database", "")) + } + } id => { let client = take_weakf!(self.client); @@ -387,7 +399,13 @@ impl Eth for EthClient where fn transaction_count(&self, address: RpcH160, num: Trailing) -> BoxFuture { let address: Address = RpcH160::into(address); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).nonce(&*take_weakf!(self.client), &address).into()), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).nonce(&*client, &address) { + Some(nonce) => Ok(nonce.into()), + None => Err(errors::internal("Unable to load nonce from database", "")) + } + } id => { let client = take_weakf!(self.client); @@ -437,7 +455,13 @@ impl Eth for EthClient where let address: Address = RpcH160::into(address); let res = match num.0.clone() { - BlockNumber::Pending => Ok(take_weakf!(self.miner).code(&*take_weakf!(self.client), &address).map_or_else(Bytes::default, Bytes::new)), + BlockNumber::Pending => { + let client = take_weakf!(self.client); + match take_weakf!(self.miner).code(&*client, &address) { + Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), + None => Err(errors::internal("Unable to load code from database", "")) + } + } id => { let client = take_weakf!(self.client); diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 6b937d733..c505a2d5d 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -18,7 +18,6 @@ use std::sync::Arc; use std::time::Duration; -use devtools::RandomTempPath; use ethcore::client::{BlockChainClient, Client, ClientConfig}; use ethcore::ids::BlockId; use ethcore::spec::{Genesis, Spec}; diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index 75ca928b4..01dd9edc7 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -254,26 +254,39 @@ impl MinerService for TestMinerService { unimplemented!(); } - fn balance(&self, _chain: &MiningBlockChainClient, address: &Address) -> U256 { - self.latest_closed_block.lock().as_ref().map_or_else(U256::zero, |b| b.block().fields().state.balance(address).clone()) + fn balance(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { + self.latest_closed_block.lock() + .as_ref() + .map(|b| b.block().fields().state.balance(address)) + .map(|b| b.ok()) + .unwrap_or(Some(U256::default())) } fn call(&self, _chain: &MiningBlockChainClient, _t: &SignedTransaction, _analytics: CallAnalytics) -> Result { unimplemented!(); } - fn storage_at(&self, _chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { - self.latest_closed_block.lock().as_ref().map_or_else(H256::default, |b| b.block().fields().state.storage_at(address, position).clone()) + fn storage_at(&self, _chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option { + self.latest_closed_block.lock() + .as_ref() + .map(|b| b.block().fields().state.storage_at(address, position)) + .map(|s| s.ok()) + .unwrap_or(Some(H256::default())) } - fn nonce(&self, _chain: &MiningBlockChainClient, address: &Address) -> U256 { + fn nonce(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { // we assume all transactions are in a pending block, ignoring the // reality of gas limits. - self.last_nonce(address).unwrap_or(U256::zero()) + Some(self.last_nonce(address).unwrap_or(U256::zero())) } - fn code(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { - self.latest_closed_block.lock().as_ref().map_or(None, |b| b.block().fields().state.code(address).map(|c| (*c).clone())) + fn code(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option> { + self.latest_closed_block.lock() + .as_ref() + .map(|b| b.block().fields().state.code(address)) + .map(|c| c.ok()) + .unwrap_or(None) + .map(|c| c.map(|c| (&*c).clone())) } fn sensible_gas_price(&self) -> U256 { From ddbdfafc0525c7c464038c3baba3d29778dd34ed Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Feb 2017 23:10:29 +0100 Subject: [PATCH 04/89] buffer flow -> request credits --- ethcore/light/src/net/error.rs | 8 +- ethcore/light/src/net/mod.rs | 74 +++++++++---------- .../{buffer_flow.rs => request_credits.rs} | 74 +++++++++---------- ethcore/light/src/net/status.rs | 8 +- ethcore/light/src/net/tests/mod.rs | 28 +++---- sync/src/light_sync/tests/test_net.rs | 2 +- 6 files changed, 97 insertions(+), 97 deletions(-) rename ethcore/light/src/net/{buffer_flow.rs => request_credits.rs} (82%) diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index 627a7ef0f..dda78e0b6 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -44,8 +44,8 @@ pub enum Error { Rlp(DecoderError), /// A network error. Network(NetworkError), - /// Out of buffer. - BufferEmpty, + /// Out of credits. + NoCredits, /// Unrecognized packet code. UnrecognizedPacket(u8), /// Unexpected handshake. @@ -72,7 +72,7 @@ impl Error { match *self { Error::Rlp(_) => Punishment::Disable, Error::Network(_) => Punishment::None, - Error::BufferEmpty => Punishment::Disable, + Error::NoCredits => Punishment::Disable, Error::UnrecognizedPacket(_) => Punishment::Disconnect, Error::UnexpectedHandshake => Punishment::Disconnect, Error::WrongNetwork => Punishment::Disable, @@ -103,7 +103,7 @@ impl fmt::Display for Error { match *self { Error::Rlp(ref err) => err.fmt(f), Error::Network(ref err) => err.fmt(f), - Error::BufferEmpty => write!(f, "Out of buffer"), + Error::NoCredits => write!(f, "Out of request credits"), Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code), Error::UnexpectedHandshake => write!(f, "Unexpected handshake"), Error::WrongNetwork => write!(f, "Wrong network"), diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 898934965..2ffaffa64 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -37,7 +37,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; use request::{self, HashOrNumber, Request}; -use self::buffer_flow::{Buffer, FlowParams}; +use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; use self::error::Punishment; use self::request_set::RequestSet; @@ -51,7 +51,7 @@ mod request_set; #[cfg(test)] mod tests; -pub mod buffer_flow; +pub mod request_credits; pub use self::error::Error; pub use self::context::{BasicContext, EventContext, IoContext}; @@ -143,10 +143,10 @@ struct PendingPeer { /// Relevant data to each peer. Not accessible publicly, only `pub` due to /// limitations of the privacy system. pub struct Peer { - local_buffer: Buffer, // their buffer relative to us + local_credits: Credits, // their credits relative to us status: Status, capabilities: Capabilities, - remote_flow: Option<(Buffer, FlowParams)>, + remote_flow: Option<(Credits, FlowParams)>, sent_head: H256, // last chain head we've given them. last_update: SteadyTime, pending_requests: RequestSet, @@ -155,21 +155,21 @@ pub struct Peer { impl Peer { // check the maximum cost of a request, returning an error if there's - // not enough buffer left. + // not enough credits left. // returns the calculated maximum cost. fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result { - flow_params.recharge(&mut self.local_buffer); + flow_params.recharge(&mut self.local_credits); let max_cost = flow_params.compute_cost(kind, max); - self.local_buffer.deduct_cost(max_cost)?; + self.local_credits.deduct_cost(max_cost)?; Ok(max_cost) } - // refund buffer for a request. returns new buffer amount. + // refund credits for a request. returns new amount of credits. fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { - flow_params.refund(&mut self.local_buffer, amount); + flow_params.refund(&mut self.local_credits, amount); - self.local_buffer.current() + self.local_credits.current() } } @@ -218,7 +218,7 @@ pub trait Handler: Send + Sync { pub struct Params { /// Network id. pub network_id: u64, - /// Buffer flow parameters. + /// Request credits parameters. pub flow_params: FlowParams, /// Initial capabilities. pub capabilities: Capabilities, @@ -324,14 +324,14 @@ impl LightProtocol { /// Check the maximum amount of requests of a specific type /// which a peer would be able to serve. Returns zero if the - /// peer is unknown or has no buffer flow parameters. + /// peer is unknown or has no credit parameters. fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { self.peers.read().get(&peer).and_then(|peer| { let mut peer = peer.lock(); match peer.remote_flow { - Some((ref mut buf, ref flow)) => { - flow.recharge(buf); - Some(flow.max_amount(&*buf, kind)) + Some((ref mut c, ref flow)) => { + flow.recharge(c); + Some(flow.max_amount(&*c, kind)) } None => None, } @@ -341,7 +341,7 @@ impl LightProtocol { /// Make a request to a peer. /// /// Fails on: nonexistent peer, network error, peer not server, - /// insufficient buffer. Does not check capabilities before sending. + /// insufficient credits. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { @@ -350,10 +350,10 @@ impl LightProtocol { let mut peer = peer.lock(); match peer.remote_flow { - Some((ref mut buf, ref flow)) => { - flow.recharge(buf); + Some((ref mut c, ref flow)) => { + flow.recharge(c); let max = flow.compute_cost(request.kind(), request.amount()); - buf.deduct_cost(max)?; + c.deduct_cost(max)?; } None => return Err(Error::NotServer), } @@ -454,7 +454,7 @@ impl LightProtocol { // - check whether request kinds match fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result { let req_id = ReqId(raw.val_at(0)?); - let cur_buffer: U256 = raw.val_at(1)?; + let cur_credits: U256 = raw.val_at(1)?; trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind); @@ -470,9 +470,9 @@ impl LightProtocol { (Some(request), Some(flow_info)) => { had_req = true; - let &mut (ref mut buf, ref mut flow) = flow_info; - let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit()); - buf.update_to(actual_buffer); + let &mut (ref mut c, ref mut flow) = flow_info; + let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); + c.update_to(actual_credits); if request.kind() != kind { Some(Error::UnsolicitedResponse) @@ -675,10 +675,10 @@ impl LightProtocol { return Err(Error::BadProtocolVersion); } - let remote_flow = flow_params.map(|params| (params.create_buffer(), params)); + let remote_flow = flow_params.map(|params| (params.create_credits(), params)); self.peers.write().insert(*peer, Mutex::new(Peer { - local_buffer: self.flow_params.create_buffer(), + local_credits: self.flow_params.create_credits(), status: status.clone(), capabilities: capabilities.clone(), remote_flow: remote_flow, @@ -783,10 +783,10 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len()); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::BLOCK_HEADERS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for header in response { stream.append_raw(&header.into_inner(), 1); @@ -845,11 +845,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::BLOCK_BODIES, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for body in response { match body { @@ -911,11 +911,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::RECEIPTS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for receipts in response { stream.append_raw(&receipts, 1); @@ -985,11 +985,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::PROOFS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for proof in response { stream.append_raw(&proof, 1); @@ -1057,11 +1057,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::CONTRACT_CODES, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for code in response { stream.append(&code); @@ -1130,11 +1130,11 @@ impl LightProtocol { let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len); assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost); + let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); io.respond(packet::HEADER_PROOFS, { let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_buffer).begin_list(response.len()); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); for proof in response { stream.append_raw(&proof, 1); diff --git a/ethcore/light/src/net/buffer_flow.rs b/ethcore/light/src/net/request_credits.rs similarity index 82% rename from ethcore/light/src/net/buffer_flow.rs rename to ethcore/light/src/net/request_credits.rs index cce54da59..4f5d79504 100644 --- a/ethcore/light/src/net/buffer_flow.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! LES buffer flow management. +//! Request credit management. //! -//! Every request in the LES protocol leads to a reduction -//! of the requester's buffer value as a rate-limiting mechanism. -//! This buffer value will recharge at a set rate. +//! Every request in the light protocol leads to a reduction +//! of the requester's amount of credits as a rate-limiting mechanism. +//! The amount of credits will recharge at a set rate. //! -//! This module provides an interface for configuration of buffer -//! flow costs and recharge rates. +//! This module provides an interface for configuration of +//! costs and recharge rates of request credits. //! //! Current default costs are picked completely arbitrarily, not based //! on any empirical timings or mathematical models. @@ -38,19 +38,19 @@ use time::{Duration, SteadyTime}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct Cost(pub U256, pub U256); -/// Buffer value. +/// Credits value. /// /// Produced and recharged using `FlowParams`. /// Definitive updates can be made as well -- these will reset the recharge /// point to the time of the update. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Buffer { +pub struct Credits { estimate: U256, recharge_point: SteadyTime, } -impl Buffer { - /// Get the current buffer value. +impl Credits { + /// Get the current amount of credits.. pub fn current(&self) -> U256 { self.estimate.clone() } /// Make a definitive update. @@ -61,7 +61,7 @@ impl Buffer { self.recharge_point = SteadyTime::now(); } - /// Attempt to apply the given cost to the buffer. + /// Attempt to apply the given cost to the amount of credits. /// /// If successful, the cost will be deducted successfully. /// @@ -69,7 +69,7 @@ impl Buffer { /// error will be produced. pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> { match cost > self.estimate { - true => Err(Error::BufferEmpty), + true => Err(Error::NoCredits), false => { self.estimate = self.estimate - cost; Ok(()) @@ -165,7 +165,7 @@ impl RlpDecodable for CostTable { } } -/// A buffer-flow manager handles costs, recharge, limits +/// Handles costs, recharge, limits of request credits. #[derive(Debug, Clone, PartialEq)] pub struct FlowParams { costs: CostTable, @@ -175,7 +175,7 @@ pub struct FlowParams { impl FlowParams { /// Create new flow parameters from a request cost table, - /// buffer limit, and (minimum) rate of recharge. + /// credit limit, and (minimum) rate of recharge. pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self { FlowParams { costs: costs, @@ -201,7 +201,7 @@ impl FlowParams { } } - /// Get a reference to the buffer limit. + /// Get a reference to the credit limit. pub fn limit(&self) -> &U256 { &self.limit } /// Get a reference to the cost table. @@ -227,10 +227,10 @@ impl FlowParams { } /// Compute the maximum number of costs of a specific kind which can be made - /// with the given buffer. + /// with the given amount of credits /// Saturates at `usize::max()`. This is not a problem in practice because /// this amount of requests is already prohibitively large. - pub fn max_amount(&self, buffer: &Buffer, kind: request::Kind) -> usize { + pub fn max_amount(&self, credits: &Credits, kind: request::Kind) -> usize { use util::Uint; use std::usize; @@ -243,7 +243,7 @@ impl FlowParams { request::Kind::HeaderProofs => &self.costs.header_proofs, }; - let start = buffer.current(); + let start = credits.current(); if start <= cost.0 { return 0; @@ -259,36 +259,36 @@ impl FlowParams { } } - /// Create initial buffer parameter. - pub fn create_buffer(&self) -> Buffer { - Buffer { + /// Create initial credits.. + pub fn create_credits(&self) -> Credits { + Credits { estimate: self.limit, recharge_point: SteadyTime::now(), } } - /// Recharge the buffer based on time passed since last + /// Recharge the given credits based on time passed since last /// update. - pub fn recharge(&self, buf: &mut Buffer) { + pub fn recharge(&self, credits: &mut Credits) { let now = SteadyTime::now(); // recompute and update only in terms of full seconds elapsed // in order to keep the estimate as an underestimate. - let elapsed = (now - buf.recharge_point).num_seconds(); - buf.recharge_point = buf.recharge_point + Duration::seconds(elapsed); + let elapsed = (now - credits.recharge_point).num_seconds(); + credits.recharge_point = credits.recharge_point + Duration::seconds(elapsed); let elapsed: U256 = elapsed.into(); - buf.estimate = ::std::cmp::min(self.limit, buf.estimate + (elapsed * self.recharge)); + credits.estimate = ::std::cmp::min(self.limit, credits.estimate + (elapsed * self.recharge)); } - /// Refund some buffer which was previously deducted. + /// Refund some credits which were previously deducted. /// Does not update the recharge timestamp. - pub fn refund(&self, buf: &mut Buffer, refund_amount: U256) { - buf.estimate = buf.estimate + refund_amount; + pub fn refund(&self, credits: &mut Credits, refund_amount: U256) { + credits.estimate = credits.estimate + refund_amount; - if buf.estimate > self.limit { - buf.estimate = self.limit + if credits.estimate > self.limit { + credits.estimate = self.limit } } } @@ -318,20 +318,20 @@ mod tests { } #[test] - fn buffer_mechanism() { + fn credits_mechanism() { use std::thread; use std::time::Duration; let flow_params = FlowParams::new(100.into(), Default::default(), 20.into()); - let mut buffer = flow_params.create_buffer(); + let mut credits = flow_params.create_credits(); - assert!(buffer.deduct_cost(101.into()).is_err()); - assert!(buffer.deduct_cost(10.into()).is_ok()); + assert!(credits.deduct_cost(101.into()).is_err()); + assert!(credits.deduct_cost(10.into()).is_ok()); thread::sleep(Duration::from_secs(1)); - flow_params.recharge(&mut buffer); + flow_params.recharge(&mut credits); - assert_eq!(buffer.estimate, 100.into()); + assert_eq!(credits.estimate, 100.into()); } } diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index 655dc404f..3e32f6609 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -19,7 +19,7 @@ use rlp::{DecoderError, RlpDecodable, RlpEncodable, RlpStream, Stream, UntrustedRlp, View}; use util::{H256, U256}; -use super::buffer_flow::FlowParams; +use super::request_credits::FlowParams; // recognized handshake/announcement keys. // unknown keys are to be skipped, known keys have a defined order. @@ -207,7 +207,7 @@ impl Capabilities { /// Attempt to parse a handshake message into its three parts: /// - chain status /// - serving capabilities -/// - buffer flow parameters +/// - request credit parameters pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, Option), DecoderError> { let mut parser = Parser { pos: 0, @@ -300,7 +300,7 @@ pub struct Announcement { pub serve_chain_since: Option, /// optional new transaction-relay capability. false means "no change" pub tx_relay: bool, - // TODO: changes in buffer flow? + // TODO: changes in request credits. } /// Parse an announcement. @@ -372,7 +372,7 @@ pub fn write_announcement(announcement: &Announcement) -> Vec { #[cfg(test)] mod tests { use super::*; - use super::super::buffer_flow::FlowParams; + use super::super::request_credits::FlowParams; use util::{U256, H256, FixedHash}; use rlp::{RlpStream, Stream ,UntrustedRlp, View}; diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 47d73aef2..4efa6f680 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -24,7 +24,7 @@ use ethcore::transaction::PendingTransaction; use ethcore::encoded; use network::{PeerId, NodeId}; -use net::buffer_flow::FlowParams; +use net::request_credits::FlowParams; use net::context::IoContext; use net::status::{Capabilities, Status, write_handshake}; use net::{encode_request, LightProtocol, Params, packet, Peer}; @@ -203,7 +203,7 @@ fn genesis_mismatch() { } #[test] -fn buffer_overflow() { +fn credit_overflow() { let flow_params = make_flow_params(); let capabilities = capabilities(); @@ -268,11 +268,11 @@ fn get_block_headers() { let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(headers.len(), 10); - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(10); + response_stream.append(&req_id).append(&new_creds).begin_list(10); for header in headers { response_stream.append_raw(&header.into_inner(), 1); } @@ -317,11 +317,11 @@ fn get_block_bodies() { let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(bodies.len(), 10); - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(10); + response_stream.append(&req_id).append(&new_creds).begin_list(10); for body in bodies { response_stream.append_raw(&body.into_inner(), 1); } @@ -371,11 +371,11 @@ fn get_block_receipts() { .map(|hash| provider.client.block_receipts(hash).unwrap()) .collect(); - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(receipts.len()); + response_stream.append(&req_id).append(&new_creds).begin_list(receipts.len()); for block_receipts in receipts { response_stream.append_raw(&block_receipts, 1); } @@ -420,11 +420,11 @@ fn get_state_proofs() { vec![::util::sha3::SHA3_NULL_RLP.to_vec()], ]; - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(2); + response_stream.append(&req_id).append(&new_creds).begin_list(2); for proof in proofs { response_stream.begin_list(proof.len()); for node in proof { @@ -472,11 +472,11 @@ fn get_contract_code() { key2.iter().chain(key2.iter()).cloned().collect(), ]; - let new_buf = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_buf).begin_list(2); + response_stream.append(&req_id).append(&new_creds).begin_list(2); for code in codes { response_stream.append(&code); } @@ -515,10 +515,10 @@ fn id_guard() { pending_requests.insert(req_id_2, req, ::time::SteadyTime::now()); proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { - local_buffer: flow_params.create_buffer(), + local_credits: flow_params.create_credits(), status: status(provider.client.chain_info()), capabilities: capabilities.clone(), - remote_flow: Some((flow_params.create_buffer(), flow_params)), + remote_flow: Some((flow_params.create_credits(), flow_params)), sent_head: provider.client.chain_info().best_block_hash, last_update: ::time::SteadyTime::now(), pending_requests: pending_requests, diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index b73da48bb..d0e472374 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -27,7 +27,7 @@ use ethcore::spec::Spec; use io::IoChannel; use light::client::Client as LightClient; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; -use light::net::buffer_flow::FlowParams; +use light::net::request_credits::FlowParams; use network::{NodeId, PeerId}; use util::RwLock; From ee7779df171e69fe2eb5f022b9f8e119cb55aa84 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 24 Feb 2017 20:16:32 +0100 Subject: [PATCH 05/89] proving state backend --- ethcore/src/state/backend.rs | 94 +++++++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index 81a770fe7..dfb4465fa 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -21,10 +21,12 @@ //! should become general over time to the point where not even a //! merkle trie is strictly necessary. +use std::collections::{HashSet, HashMap}; use std::sync::Arc; use state::Account; -use util::{Address, AsHashDB, HashDB, H256}; +use util::{Address, MemoryDB, Mutex, H256}; +use util::hashdb::{AsHashDB, HashDB, DBValue}; /// State backend. See module docs for more details. pub trait Backend: Send { @@ -91,3 +93,93 @@ impl Backend for NoCache { fn note_non_null_account(&self, _address: &Address) {} fn is_known_null(&self, _address: &Address) -> bool { false } } + +/// Proving state backend. +/// See module docs for more details. +/// +/// This doesn't cache anything or rely on the canonical state caches. +#[derive(Debug, Clone, PartialEq)] +pub struct Proving { + base: H, // state we're proving values from. + changed: MemoryDB, // changed state via insertions. + proof: Mutex>, +} + +impl HashDB for Proving { + fn keys(&self) -> HashMap { + self.base.as_hashdb().keys() + .extend(self.changed.keys()) + } + + fn get(&self, key: &H256) -> Option { + match self.base.as_hashdb().get(key) { + Some(val) => { + self.proof.lock().insert(val.clone()); + Some(val) + } + None => self.changed.get(key) + } + } + + fn contains(&self, key: &H256) -> bool { + self.get(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.changed.insert(value) + } + + fn emplace(&mut self, key: H256, value: DBValue) { + self.changed.emplace(key, value) + } + + fn remove(&mut self, key: &H256) { + // only remove from `changed` + if self.changed.contains(key) { + self.changed.remove(key) + } + } +} + +impl Backend for Proving { + fn as_hashdb(&self) -> &HashDB { + self + } + + fn as_hashdb_mut(&mut self) -> &mut HashDB { + self + } + + fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) { } + + fn cache_code(&self, _: H256, _: Arc>) { } + + fn get_cached_account(&self, _: &Address) -> Option> { None } + + fn get_cached(&self, _: &Address, _: F) -> Option + where F: FnOnce(Option<&mut Account>) -> U + { + None + } + + fn get_cached_code(&self, _: &H256) -> Option>> { None } + fn note_non_null_account(&self, _: &Address) { } + fn is_known_null(&self, _: &Address) -> bool { false } +} + +impl Proving { + /// Create a new `Proving` over a base database. + /// This will store all values ever fetched from that base. + pub fn new(base: H) -> Self { + Proving { + base: base, + changed: MemoryDB::new(), + proof: Mutex::new(HashSet::new()), + } + } + + /// Consume the backend, extracting the gathered proof. + pub fn extract_proof(self) -> Vec { + self.proof.into_inner().into_iter().collect() + } +} From 92e5982127548aed46b92354bde2c4916e0352a7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 00:27:48 +0100 Subject: [PATCH 06/89] generate transaction proofs from provider --- ethcore/light/src/client/mod.rs | 6 +- ethcore/light/src/net/mod.rs | 29 +++++++- ethcore/light/src/net/request_credits.rs | 10 ++- ethcore/light/src/net/request_set.rs | 1 + ethcore/light/src/provider.rs | 30 +++++++- ethcore/light/src/types/les_request.rs | 33 ++++++++- ethcore/src/client/client.rs | 92 ++++++++++++------------ ethcore/src/client/test_client.rs | 4 ++ ethcore/src/client/traits.rs | 4 ++ ethcore/src/state/backend.rs | 18 +++-- ethcore/src/state/mod.rs | 13 ++++ util/src/hashdb.rs | 10 +++ 12 files changed, 195 insertions(+), 55 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index a113b4367..9626f9f6c 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{Bytes, H256, Mutex, RwLock}; +use util::{Bytes, DBValue, H256, Mutex, RwLock}; use self::header_chain::HeaderChain; @@ -293,6 +293,10 @@ impl ::provider::Provider for Client { None } + fn transaction_proof(&self, _req: ::request::TransactionProof) -> Option> { + None + } + fn ready_transactions(&self) -> Vec<::ethcore::transaction::PendingTransaction> { Vec::new() } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 2ffaffa64..ad1eaac00 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -19,7 +19,7 @@ //! This uses a "Provider" to answer requests. //! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) -use ethcore::transaction::UnverifiedTransaction; +use ethcore::transaction::{Action, UnverifiedTransaction}; use ethcore::receipt::Receipt; use io::TimerToken; @@ -73,7 +73,7 @@ pub const PROTOCOL_VERSIONS: &'static [u8] = &[1]; pub const MAX_PROTOCOL_VERSION: u8 = 1; /// Packet count for LES. -pub const PACKET_COUNT: u8 = 15; +pub const PACKET_COUNT: u8 = 17; // packet ID definitions. mod packet { @@ -109,6 +109,10 @@ mod packet { // request and response for header proofs in a CHT. pub const GET_HEADER_PROOFS: u8 = 0x0d; pub const HEADER_PROOFS: u8 = 0x0e; + + // request and response for transaction proof. + pub const GET_TRANSACTION_PROOF: u8 = 0x0f; + pub const TRANSACTION_PROOF: u8 = 0x10; } // timeouts for different kinds of requests. all values are in milliseconds. @@ -121,6 +125,7 @@ mod timeout { pub const PROOFS: i64 = 4000; pub const CONTRACT_CODES: i64 = 5000; pub const HEADER_PROOFS: i64 = 3500; + pub const TRANSACTION_PROOF: i64 = 5000; } /// A request id. @@ -370,6 +375,7 @@ impl LightProtocol { request::Kind::StateProofs => packet::GET_PROOFS, request::Kind::Codes => packet::GET_CONTRACT_CODES, request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS, + request::Kind::TransactionProof => packet::GET_TRANSACTION_PROOF, }; io.send(*peer_id, packet_id, packet_data); @@ -1320,6 +1326,25 @@ fn encode_request(req: &Request, req_id: usize) -> Vec { .append(&proof_req.from_level); } + stream.out() + } + Request::TransactionProof(ref request) => { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id).begin_list(7) + .append(&request.at) + .append(&request.from); + + match request.action { + Action::Create => stream.append_empty_data(), + Action::Call(ref to) => stream.append(to), + }; + + stream + .append(&request.gas) + .append(&request.gas_price) + .append(&request.value) + .append(&request.data); + stream.out() } } diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 4f5d79504..3a1cb9996 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -81,12 +81,13 @@ impl Credits { /// A cost table, mapping requests to base and per-request costs. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CostTable { - headers: Cost, + headers: Cost, // cost per header bodies: Cost, receipts: Cost, state_proofs: Cost, contract_codes: Cost, header_proofs: Cost, + transaction_proof: Cost, // cost per gas. } impl Default for CostTable { @@ -99,6 +100,7 @@ impl Default for CostTable { state_proofs: Cost(250000.into(), 25000.into()), contract_codes: Cost(200000.into(), 20000.into()), header_proofs: Cost(150000.into(), 15000.into()), + transaction_proof: Cost(100000.into(), 2.into()), } } } @@ -133,6 +135,7 @@ impl RlpDecodable for CostTable { let mut state_proofs = None; let mut contract_codes = None; let mut header_proofs = None; + let mut transaction_proof = None; for row in rlp.iter() { let msg_id: u8 = row.val_at(0)?; @@ -150,6 +153,7 @@ impl RlpDecodable for CostTable { packet::GET_PROOFS => state_proofs = Some(cost), packet::GET_CONTRACT_CODES => contract_codes = Some(cost), packet::GET_HEADER_PROOFS => header_proofs = Some(cost), + packet::GET_TRANSACTION_PROOF => transaction_proof = Some(cost), _ => return Err(DecoderError::Custom("Unrecognized message in cost table")), } } @@ -161,6 +165,7 @@ impl RlpDecodable for CostTable { state_proofs: state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))?, contract_codes: contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))?, header_proofs: header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))?, + transaction_proof: transaction_proof.ok_or(DecoderError::Custom("No transaction proof gas cost specified"))?, }) } } @@ -197,6 +202,7 @@ impl FlowParams { state_proofs: free_cost.clone(), contract_codes: free_cost.clone(), header_proofs: free_cost.clone(), + transaction_proof: free_cost, } } } @@ -220,6 +226,7 @@ impl FlowParams { request::Kind::StateProofs => &self.costs.state_proofs, request::Kind::Codes => &self.costs.contract_codes, request::Kind::HeaderProofs => &self.costs.header_proofs, + request::Kind::TransactionProof => &self.costs.transaction_proof, }; let amount: U256 = amount.into(); @@ -241,6 +248,7 @@ impl FlowParams { request::Kind::StateProofs => &self.costs.state_proofs, request::Kind::Codes => &self.costs.contract_codes, request::Kind::HeaderProofs => &self.costs.header_proofs, + request::Kind::TransactionProof => &self.costs.transaction_proof, }; let start = credits.current(); diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index c9f278776..57eb232cc 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -101,6 +101,7 @@ impl RequestSet { request::Kind::StateProofs => timeout::PROOFS, request::Kind::Codes => timeout::CONTRACT_CODES, request::Kind::HeaderProofs => timeout::HEADER_PROOFS, + request::Kind::TransactionProof => timeout::TRANSACTION_PROOF, }; base + Duration::milliseconds(kind_timeout) <= now diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index caade3857..2ef7f1f04 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -24,7 +24,7 @@ use ethcore::client::{BlockChainClient, ProvingBlockChainClient}; use ethcore::transaction::PendingTransaction; use ethcore::ids::BlockId; use ethcore::encoded; -use util::{Bytes, RwLock, H256}; +use util::{Bytes, DBValue, RwLock, H256}; use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; @@ -193,6 +193,10 @@ pub trait Provider: Send + Sync { /// Provide pending transactions. fn ready_transactions(&self) -> Vec; + + /// Provide a proof-of-execution for the given transaction proof request. + /// Returns a vector of all state items necessary to execute the transaction. + fn transaction_proof(&self, req: request::TransactionProof) -> Option>; } // Implementation of a light client data provider for a client. @@ -283,6 +287,26 @@ impl Provider for T { } } + fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + use ethcore::transaction::Transaction; + + let id = BlockId::Hash(req.at); + let nonce = match self.nonce(&req.from, id.clone()) { + Some(nonce) => nonce, + None => return None, + }; + let transaction = Transaction { + nonce: nonce, + gas: req.gas, + gas_price: req.gas_price, + action: req.action, + value: req.value, + data: req.data, + }.fake_sign(req.from); + + self.prove_transaction(transaction, id) + } + fn ready_transactions(&self) -> Vec { BlockChainClient::ready_transactions(self) } @@ -343,6 +367,10 @@ impl Provider for LightProvider { None } + fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + None + } + fn ready_transactions(&self) -> Vec { let chain_info = self.chain_info(); self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp) diff --git a/ethcore/light/src/types/les_request.rs b/ethcore/light/src/types/les_request.rs index b4940980e..dbff19eb5 100644 --- a/ethcore/light/src/types/les_request.rs +++ b/ethcore/light/src/types/les_request.rs @@ -16,7 +16,8 @@ //! LES request types. -use util::H256; +use ethcore::transaction::Action; +use util::{Address, H256, U256, Uint}; /// Either a hash or a number. #[derive(Debug, Clone, PartialEq, Eq)] @@ -134,6 +135,26 @@ pub struct HeaderProofs { pub requests: Vec, } +/// A request for proof of (simulated) transaction execution. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", binary)] +pub struct TransactionProof { + /// Block hash to request for. + pub at: H256, + /// Address to treat as the caller. + pub from: Address, + /// Action to take: either a call or a create. + pub action: Action, + /// Amount of gas to request proof-of-execution for. + pub gas: U256, + /// Price for each gas. + pub gas_price: U256, + /// Value to simulate sending. + pub value: U256, + /// Transaction data. + pub data: Vec, +} + /// Kinds of requests. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "ipc", binary)] @@ -150,6 +171,8 @@ pub enum Kind { Codes, /// Requesting header proofs (from the CHT). HeaderProofs, + /// Requesting proof of transaction execution. + TransactionProof, } /// Encompasses all possible types of requests in a single structure. @@ -168,6 +191,8 @@ pub enum Request { Codes(ContractCodes), /// Requesting header proofs. HeaderProofs(HeaderProofs), + /// Requesting proof of transaction execution. + TransactionProof(TransactionProof), } impl Request { @@ -180,10 +205,12 @@ impl Request { Request::StateProofs(_) => Kind::StateProofs, Request::Codes(_) => Kind::Codes, Request::HeaderProofs(_) => Kind::HeaderProofs, + Request::TransactionProof(_) => Kind::TransactionProof, } } /// Get the amount of requests being made. + /// In the case of `TransactionProof`, this is the amount of gas being requested. pub fn amount(&self) -> usize { match *self { Request::Headers(ref req) => req.max, @@ -192,6 +219,10 @@ impl Request { Request::StateProofs(ref req) => req.requests.len(), Request::Codes(ref req) => req.code_requests.len(), Request::HeaderProofs(ref req) => req.requests.len(), + Request::TransactionProof(ref req) => match req.gas > usize::max_value().into() { + true => usize::max_value(), + false => req.gas.low_u64() as usize, + } } } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 7f209bad1..d8849ded3 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -24,7 +24,7 @@ use time::precise_time_ns; // util use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, MutexGuard, Hashable}; -use util::{journaldb, TrieFactory, Trie}; +use util::{journaldb, DBValue, TrieFactory, Trie}; use util::{U256, H256, Address, H2048, Uint, FixedHash}; use util::trie::TrieSpec; use util::kvdb::*; @@ -34,7 +34,7 @@ use io::*; use views::BlockView; use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use header::BlockNumber; -use state::{State, CleanupMode}; +use state::{self, State, CleanupMode}; use spec::Spec; use basic_types::Seal; use engines::Engine; @@ -309,17 +309,23 @@ impl Client { /// The env info as of the best block. fn latest_env_info(&self) -> EnvInfo { - let header = self.best_block_header(); + self.env_info(BlockId::Latest).expect("Best block header always stored; qed") + } - EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: self.build_last_hashes(header.hash()), - gas_used: U256::default(), - gas_limit: header.gas_limit(), - } + /// The env info as of a given block. + /// returns `None` if the block unknown. + fn env_info(&self, id: BlockId) -> Option { + self.block_header(id).map(|header| { + EnvInfo { + number: header.number(), + author: header.author(), + timestamp: header.timestamp(), + difficulty: header.difficulty(), + last_hashes: self.build_last_hashes(header.parent_hash()), + gas_used: U256::default(), + gas_limit: header.gas_limit(), + } + }) } fn build_last_hashes(&self, parent_hash: H256) -> Arc { @@ -874,17 +880,8 @@ impl snapshot::DatabaseRestore for Client { impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result { - let header = self.block_header(block).ok_or(CallError::StatePruned)?; - let last_hashes = self.build_last_hashes(header.parent_hash()); - let env_info = EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: last_hashes, - gas_used: U256::zero(), - gas_limit: U256::max_value(), - }; + let env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + // that's just a copy of the state. let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; @@ -910,17 +907,13 @@ impl BlockChainClient for Client { fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result { const UPPER_CEILING: u64 = 1_000_000_000_000u64; - let header = self.block_header(block).ok_or(CallError::StatePruned)?; - let last_hashes = self.build_last_hashes(header.parent_hash()); - let env_info = EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: last_hashes, - gas_used: U256::zero(), - gas_limit: UPPER_CEILING.into(), + let (mut upper, env_info) = { + let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + let initial_upper = env_info.gas_limit; + env_info.gas_limit = UPPER_CEILING.into(); + (initial_upper, env_info) }; + // that's just a copy of the state. let original_state = self.state_at(block).ok_or(CallError::StatePruned)?; let sender = t.sender(); @@ -946,7 +939,6 @@ impl BlockChainClient for Client { .unwrap_or(false)) }; - let mut upper = header.gas_limit(); if !cond(upper)? { // impossible at block gas limit - try `UPPER_CEILING` instead. // TODO: consider raising limit by powers of two. @@ -989,7 +981,7 @@ impl BlockChainClient for Client { fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result { let address = self.transaction_address(id).ok_or(CallError::TransactionNotFound)?; - let header = self.block_header(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; + let mut env_info = self.env_info(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let body = self.block_body(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let mut state = self.state_at_beginning(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?; let mut txs = body.transactions(); @@ -999,16 +991,6 @@ impl BlockChainClient for Client { } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let last_hashes = self.build_last_hashes(header.hash()); - let mut env_info = EnvInfo { - number: header.number(), - author: header.author(), - timestamp: header.timestamp(), - difficulty: header.difficulty(), - last_hashes: last_hashes, - gas_used: U256::default(), - gas_limit: header.gas_limit(), - }; const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed"; let rest = txs.split_off(address.index); for t in txs { @@ -1620,6 +1602,26 @@ impl ::client::ProvingBlockChainClient for Client { .and_then(|x| x) .unwrap_or_else(Vec::new) } + + fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { + let (state, env_info) = match (self.state_at(id), self.env_info(id)) { + (Some(s), Some(e)) => (s, e), + _ => return None, + }; + let mut jdb = self.state_db.lock().journal_db().boxed_clone(); + let backend = state::backend::Proving::new(jdb.as_hashdb_mut()); + + let mut state = state.replace_backend(backend); + let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false }; + let res = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&transaction, options); + + + match res { + Err(ExecutionError::Internal(_)) => return None, + _ => return Some(state.drop().1.extract_proof()), + } + } + } impl Drop for Client { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index dc9cb5944..5d436f4c5 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -765,6 +765,10 @@ impl ProvingBlockChainClient for TestBlockChainClient { fn code_by_hash(&self, _: H256, _: BlockId) -> Bytes { Vec::new() } + + fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option> { + None + } } impl EngineClient for TestBlockChainClient { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index dce708b3a..13abb33f9 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -16,6 +16,7 @@ use std::collections::BTreeMap; use util::{U256, Address, H256, H2048, Bytes, Itertools}; +use util::hashdb::DBValue; use util::stats::Histogram; use blockchain::TreeRoute; use verification::queue::QueueInfo as BlockQueueInfo; @@ -336,4 +337,7 @@ pub trait ProvingBlockChainClient: BlockChainClient { /// Get code by address hash. fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes; + + /// Prove execution of a transaction at the given block. + fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option>; } diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index dfb4465fa..041eb71b4 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -98,7 +98,6 @@ impl Backend for NoCache { /// See module docs for more details. /// /// This doesn't cache anything or rely on the canonical state caches. -#[derive(Debug, Clone, PartialEq)] pub struct Proving { base: H, // state we're proving values from. changed: MemoryDB, // changed state via insertions. @@ -107,8 +106,9 @@ pub struct Proving { impl HashDB for Proving { fn keys(&self) -> HashMap { - self.base.as_hashdb().keys() - .extend(self.changed.keys()) + let mut keys = self.base.as_hashdb().keys(); + keys.extend(self.changed.keys()); + keys } fn get(&self, key: &H256) -> Option { @@ -141,7 +141,7 @@ impl HashDB for Proving { } } -impl Backend for Proving { +impl Backend for Proving { fn as_hashdb(&self) -> &HashDB { self } @@ -183,3 +183,13 @@ impl Proving { self.proof.into_inner().into_iter().collect() } } + +impl Clone for Proving { + fn clone(&self) -> Self { + Proving { + base: self.base.clone(), + changed: self.changed.clone(), + proof: Mutex::new(self.proof.lock().clone()), + } + } +} diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index e25d7d404..ce711ffbd 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -264,6 +264,19 @@ impl State { Ok(state) } + /// Swap the current backend for another. + // TODO: [rob] find a less hacky way to avoid duplication of `Client::state_at`. + pub fn replace_backend(self, backend: T) -> State { + State { + db: backend, + root: self.root, + cache: self.cache, + checkpoints: self.checkpoints, + account_start_nonce: self.account_start_nonce, + factories: self.factories, + } + } + /// Create a recoverable checkpoint of this state. pub fn checkpoint(&mut self) { self.checkpoints.get_mut().push(HashMap::new()); diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index 3b1939cae..8217413ef 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -125,3 +125,13 @@ impl AsHashDB for T { self } } + +impl<'a> AsHashDB for &'a mut HashDB { + fn as_hashdb(&self) -> &HashDB { + &**self + } + + fn as_hashdb_mut(&mut self) -> &mut HashDB { + &mut **self + } +} From 4158693470035f9a6577b65f428b13a6ae506f3e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 11:07:38 +0100 Subject: [PATCH 07/89] network messages for transaction proof --- ethcore/light/src/net/mod.rs | 88 +++++++++++++++++++++++- ethcore/light/src/net/request_credits.rs | 3 +- ethcore/light/src/net/tests/mod.rs | 6 +- ethcore/light/src/provider.rs | 2 +- 4 files changed, 95 insertions(+), 4 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index ad1eaac00..6bb1cb227 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -26,7 +26,7 @@ use io::TimerToken; use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use rlp::{RlpStream, Stream, UntrustedRlp, View}; use util::hash::H256; -use util::{Bytes, Mutex, RwLock, U256}; +use util::{Bytes, DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; use std::collections::HashMap; @@ -211,6 +211,8 @@ pub trait Handler: Send + Sync { /// Called when a peer responds with header proofs. Each proof should be a block header coupled /// with a series of trie nodes is ascending order by distance from the root. fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } + /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. + fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } /// Called to "tick" the handler periodically. fn tick(&self, _ctx: &BasicContext) { } /// Called on abort. This signals to handlers that they should clean up @@ -535,6 +537,9 @@ impl LightProtocol { packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), + packet::GET_TRANSACTION_PROOF => self.get_transaction_proof(peer, io, rlp), + packet::TRANSACTION_PROOF => self.transaction_proof(peer, io, rlp), + packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), other => { @@ -1178,6 +1183,87 @@ impl LightProtocol { Ok(()) } + // Receive a request for proof-of-execution. + fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + const MAX_GAS: usize = 10_000_000; // refuse to execute more than this amount of gas at once. + use util::Uint; + + let peers = self.peers.read(); + let peer = match peers.get(peer) { + Some(peer) => peer, + None => { + debug!(target: "les", "Ignoring request from unknown peer"); + return Ok(()) + } + }; + let mut peer = peer.lock(); + + let req_id: u64 = raw.val_at(0)?; + + let req = { + let req_rlp = raw.at(1)?; + request::TransactionProof { + at: req_rlp.val_at(0)?, + from: req_rlp.val_at(1)?, + action: if req_rlp.at(2)?.is_empty() { + Action::Create + } else { + Action::Call(req_rlp.val_at(2)?) + }, + gas: ::std::cmp::min(req_rlp.val_at(3)?, MAX_GAS.into()), + gas_price: req_rlp.val_at(4)?, + value: req_rlp.val_at(5)?, + data: req_rlp.val_at(6)?, + } + }; + + // always charge the peer for all the gas. + peer.deduct_max(&self.flow_params, request::Kind::TransactionProof, req.gas.low_u64() as usize)?; + + let response = match self.provider.transaction_proof(req) { + Some(res) => res, + None => vec![], + }; + + let cur_credits = peer.local_credits.current(); + + io.respond(packet::TRANSACTION_PROOF, { + let mut stream = RlpStream::new_list(3); + stream.append(&req_id).append(&cur_credits).begin_list(response.len()); + + for state_item in response { + stream.append(&&state_item[..]); + } + + stream.out() + }); + + Ok(()) + } + + // Receive a response for proof-of-execution. + fn transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; + let raw_proof: Vec = raw.at(2)?.iter() + .map(|rlp| { + let mut db_val = DBValue::new(); + db_val.append_slice(rlp.data()?); + Ok(db_val) + }) + .collect::, ::rlp::DecoderError>>()?; + + let req_id = id_guard.defuse(); + for handler in &self.handlers { + handler.on_transaction_proof(&Ctx { + peer: *peer, + io: io, + proto: self, + }, req_id, &raw_proof); + } + + Ok(()) + } + // Receive a set of transactions to relay. fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { const MAX_TRANSACTIONS: usize = 256; diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 3a1cb9996..97aa9b431 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -114,7 +114,7 @@ impl RlpEncodable for CostTable { .append(&cost.1); } - s.begin_list(6); + s.begin_list(7); append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers); append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies); @@ -122,6 +122,7 @@ impl RlpEncodable for CostTable { append_cost(s, packet::GET_PROOFS, &self.state_proofs); append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes); append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs); + append_cost(s, packet::GET_TRANSACTION_PROOF, &self.transaction_proof); } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 4efa6f680..8faba0b00 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -32,7 +32,7 @@ use provider::Provider; use request::{self, Request, Headers}; use rlp::*; -use util::{Bytes, H256, U256}; +use util::{Bytes, DBValue, H256, U256}; use std::sync::Arc; @@ -127,6 +127,10 @@ impl Provider for TestProvider { None } + fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + None + } + fn ready_transactions(&self) -> Vec { self.0.client.ready_transactions() } diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 2ef7f1f04..3f55a6b99 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -367,7 +367,7 @@ impl Provider for LightProvider { None } - fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { None } From 32f906fe9fc4b3e5b5ef9af4e345525128153829 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 11:54:32 +0100 Subject: [PATCH 08/89] transaction proof test --- ethcore/src/client/client.rs | 4 +-- ethcore/src/state/backend.rs | 54 +++++++++++++++++++++++++++--------- ethcore/src/tests/client.rs | 43 +++++++++++++++++++++++++++- 3 files changed, 85 insertions(+), 16 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index d8849ded3..ad61fd629 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -308,13 +308,13 @@ impl Client { } /// The env info as of the best block. - fn latest_env_info(&self) -> EnvInfo { + pub fn latest_env_info(&self) -> EnvInfo { self.env_info(BlockId::Latest).expect("Best block header always stored; qed") } /// The env info as of a given block. /// returns `None` if the block unknown. - fn env_info(&self, id: BlockId) -> Option { + pub fn env_info(&self, id: BlockId) -> Option { self.block_header(id).map(|header| { EnvInfo { number: header.number(), diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index 041eb71b4..5ab620b0e 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -66,21 +66,48 @@ pub trait Backend: Send { fn is_known_null(&self, address: &Address) -> bool; } -/// A raw backend which simply wraps a hashdb and does no caching. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct NoCache(T); +/// A raw backend used to check proofs of execution. +/// +/// This doesn't delete anything since execution proofs won't have mangled keys +/// and we want to avoid collisions. +// TODO: when account lookup moved into backends, this won't rely as tenuously on intended +// usage. +#[derive(Clone, PartialEq)] +pub struct ProofCheck(MemoryDB); -impl NoCache { - /// Create a new `NoCache` backend. - pub fn new(inner: T) -> Self { NoCache(inner) } - - /// Consume the backend, yielding the inner database. - pub fn into_inner(self) -> T { self.0 } +impl ProofCheck { + /// Create a new `ProofCheck` backend from the given state items. + pub fn new(proof: &[DBValue]) -> Self { + let mut db = MemoryDB::new(); + for item in proof { db.insert(item); } + ProofCheck(db) + } } -impl Backend for NoCache { - fn as_hashdb(&self) -> &HashDB { self.0.as_hashdb() } - fn as_hashdb_mut(&mut self) -> &mut HashDB { self.0.as_hashdb_mut() } +impl HashDB for ProofCheck { + fn keys(&self) -> HashMap { self.0.keys() } + fn get(&self, key: &H256) -> Option { + self.0.get(key) + } + + fn contains(&self, key: &H256) -> bool { + self.0.contains(key) + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.0.insert(value) + } + + fn emplace(&mut self, key: H256, value: DBValue) { + self.0.emplace(key, value) + } + + fn remove(&mut self, _key: &H256) { } +} + +impl Backend for ProofCheck { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } fn add_to_account_cache(&mut self, _addr: Address, _data: Option, _modified: bool) {} fn cache_code(&self, _hash: H256, _code: Arc>) {} fn get_cached_account(&self, _addr: &Address) -> Option> { None } @@ -95,7 +122,8 @@ impl Backend for NoCache { } /// Proving state backend. -/// See module docs for more details. +/// This keeps track of all state values loaded during usage of this backend. +/// The proof-of-execution can be extracted with `extract_proof`. /// /// This doesn't cache anything or rely on the canonical state caches. pub struct Proving { diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 809604b13..ded70b363 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -16,7 +16,8 @@ use io::IoChannel; use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId}; -use state::CleanupMode; +use state::{self, State, CleanupMode}; +use executive::Executive; use ethereum; use block::IsBlock; use tests::helpers::*; @@ -342,3 +343,43 @@ fn does_not_propagate_delayed_transactions() { assert_eq!(2, client.ready_transactions().len()); assert_eq!(2, client.miner().pending_transactions().len()); } + +#[test] +fn transaction_proof() { + use ::client::ProvingBlockChainClient; + + let client_result = generate_dummy_client(0); + let client = client_result.reference(); + let address = Address::random(); + let test_spec = Spec::new_test(); + for _ in 0..20 { + let mut b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]); + b.block_mut().fields_mut().state.add_balance(&address, &5.into(), CleanupMode::NoEmpty).unwrap(); + b.block_mut().fields_mut().state.commit().unwrap(); + let b = b.close_and_lock().seal(&*test_spec.engine, vec![]).unwrap(); + client.import_sealed_block(b).unwrap(); // account change is in the journal overlay + } + + let transaction = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 21000.into(), + action: Action::Call(Address::default()), + value: 5.into(), + data: Vec::new(), + }.fake_sign(address); + + let proof = client.prove_transaction(transaction.clone(), BlockId::Latest).unwrap(); + let backend = state::backend::ProofCheck::new(&proof); + + let mut factories = ::factory::Factories::default(); + factories.accountdb = ::account_db::Factory::Plain; // raw state values, no mangled keys. + let root = client.best_block_header().state_root(); + + let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap(); + Executive::new(&mut state, &client.latest_env_info(), &*test_spec.engine, &factories.vm) + .transact(&transaction, Default::default()).unwrap(); + + assert_eq!(state.balance(&Address::default()).unwrap(), 5.into()); + assert_eq!(state.balance(&address).unwrap(), 95.into()); +} From 2b671b847655c183cabe0441ecf83e87ee2c066d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 12:43:43 +0100 Subject: [PATCH 09/89] test for transaction proof message --- ethcore/light/src/net/tests/mod.rs | 54 ++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 8faba0b00..6a9de1467 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -20,7 +20,7 @@ use ethcore::blockchain_info::BlockChainInfo; use ethcore::client::{EachBlockWith, TestBlockChainClient}; use ethcore::ids::BlockId; -use ethcore::transaction::PendingTransaction; +use ethcore::transaction::{Action, PendingTransaction}; use ethcore::encoded; use network::{PeerId, NodeId}; @@ -32,7 +32,7 @@ use provider::Provider; use request::{self, Request, Headers}; use rlp::*; -use util::{Bytes, DBValue, H256, U256}; +use util::{Address, Bytes, DBValue, H256, U256}; use std::sync::Arc; @@ -492,6 +492,56 @@ fn get_contract_code() { proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body); } +#[test] +fn proof_of_execution() { + let flow_params = FlowParams::new(5_000_000.into(), Default::default(), 0.into()); + let capabilities = capabilities(); + + let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + + let cur_status = status(provider.client.chain_info()); + + { + let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); + proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone())); + proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body); + } + + let req_id = 112; + let mut request = Request::TransactionProof (request::TransactionProof { + at: H256::default(), + from: Address::default(), + action: Action::Call(Address::default()), + gas: 100.into(), + gas_price: 0.into(), + value: 0.into(), + data: Vec::new(), + }); + + // first: a valid amount to request execution of. + let request_body = encode_request(&request, req_id); + let response = { + let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::TransactionProof, 100); + + let mut response_stream = RlpStream::new_list(3); + response_stream.append(&req_id).append(&new_creds).begin_list(0); + + response_stream.out() + }; + + let expected = Expect::Respond(packet::TRANSACTION_PROOF, response); + proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + + // next: way too much requested gas. + if let Request::TransactionProof(ref mut req) = request { + req.gas = 100_000_000.into(); + } + let req_id = 113; + let request_body = encode_request(&request, req_id); + let expected = Expect::Punish(1); + proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); +} + #[test] fn id_guard() { use super::request_set::RequestSet; From 7c541117b32a71ed60a2fb6af6d2913065eca0ee Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 19:01:41 +0100 Subject: [PATCH 10/89] fix call bug --- ethcore/src/client/client.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index ad61fd629..21936e1c5 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -880,7 +880,8 @@ impl snapshot::DatabaseRestore for Client { impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result { - let env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?; + env_info.gas_limit = U256::max_value(); // that's just a copy of the state. let mut state = self.state_at(block).ok_or(CallError::StatePruned)?; @@ -963,13 +964,13 @@ impl BlockChainClient for Client { { while upper - lower > 1.into() { let mid = (lower + upper) / 2.into(); - trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper); + trace!(target: "binary_chop", "{} .. {} .. {}", lower, mid, upper); let c = cond(mid)?; match c { true => upper = mid, false => lower = mid, }; - trace!(target: "estimate_gas", "{} => {} .. {}", c, lower, upper); + trace!(target: "binary_chop", "{} => {} .. {}", c, lower, upper); } Ok(upper) } From 69e82e15a35a906ec71b5d94e906c32dd930fa85 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 25 Feb 2017 20:10:38 +0100 Subject: [PATCH 11/89] request transaction proofs from on_demand --- ethcore/light/src/net/mod.rs | 5 +- ethcore/light/src/on_demand/mod.rs | 81 +++++++++++++++++++++++++- ethcore/light/src/on_demand/request.rs | 35 ++++++++++- ethcore/src/client/client.rs | 1 - ethcore/src/env_info.rs | 4 +- ethcore/src/lib.rs | 3 +- ethcore/src/state/mod.rs | 62 ++++++++++++++++++-- 7 files changed, 178 insertions(+), 13 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 6bb1cb227..58ab9662e 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -1185,7 +1185,10 @@ impl LightProtocol { // Receive a request for proof-of-execution. fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - const MAX_GAS: usize = 10_000_000; // refuse to execute more than this amount of gas at once. + // refuse to execute more than this amount of gas at once. + // this is appx. the point at which the proof of execution would no longer fit in + // a single Devp2p packet. + const MAX_GAS: usize = 50_000_000; use util::Uint; let peers = self.peers.read(); diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index c34e2d922..1efff4005 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -23,12 +23,14 @@ use std::collections::HashMap; use ethcore::basic_account::BasicAccount; use ethcore::encoded; use ethcore::receipt::Receipt; +use ethcore::state::ProvedExecution; +use ethcore::executed::{Executed, ExecutionError}; use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::{RlpStream, Stream}; -use util::{Bytes, RwLock, U256}; +use util::{Bytes, DBValue, RwLock, U256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; @@ -50,6 +52,7 @@ enum Pending { BlockReceipts(request::BlockReceipts, Sender>), Account(request::Account, Sender), Code(request::Code, Sender), + TxProof(request::TransactionProof, Sender>), } /// On demand request service. See module docs for more details. @@ -347,6 +350,50 @@ impl OnDemand { self.orphaned_requests.write().push(pending) } + /// Request proof-of-execution for a transaction. + pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { + let (sender, receiver) = oneshot::channel(); + + self.dispatch_transaction_proof(ctx, req, sender); + + receiver + } + + fn dispatch_transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof, sender: Sender>) { + let num = req.header.number(); + let les_req = LesRequest::TransactionProof(les_request::TransactionProof { + at: req.header.hash(), + from: req.tx.sender(), + gas: req.tx.gas, + gas_price: req.tx.gas_price, + action: req.tx.action.clone(), + value: req.tx.value, + data: req.tx.data.clone(), + }); + let pending = Pending::TxProof(req, sender); + + // we're looking for a peer with serveStateSince(num) + for (id, peer) in self.peers.read().iter() { + if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { + match ctx.request_from(*id, les_req.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + pending + ); + return + } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + } + } + } + + trace!(target: "on_demand", "No suitable peer for request"); + self.orphaned_requests.write().push(pending) + } + // dispatch orphaned requests, and discard those for which the corresponding // receiver has been dropped. fn dispatch_orphaned(&self, ctx: &BasicContext) { @@ -390,6 +437,8 @@ impl OnDemand { if !check_hangup(&mut sender) { self.dispatch_account(ctx, req, sender) }, Pending::Code(req, mut sender) => if !check_hangup(&mut sender) { self.dispatch_code(ctx, req, sender) }, + Pending::TxProof(req, mut sender) => + if !check_hangup(&mut sender) { self.dispatch_transaction_proof(ctx, req, sender) } } } } @@ -596,6 +645,36 @@ impl Handler for OnDemand { } } + fn on_transaction_proof(&self, ctx: &EventContext, req_id: ReqId, items: &[DBValue]) { + let peer = ctx.peer(); + let req = match self.pending_requests.write().remove(&req_id) { + Some(req) => req, + None => return, + }; + + match req { + Pending::TxProof(req, sender) => { + match req.check_response(items) { + ProvedExecution::Complete(executed) => { + sender.complete(Ok(executed)); + return + } + ProvedExecution::Failed(err) => { + sender.complete(Err(err)); + return + } + ProvedExecution::BadProof => { + warn!("Error handling response for transaction proof request"); + ctx.disable_peer(peer); + } + } + + self.dispatch_transaction_proof(ctx.as_basic(), req, sender); + } + _ => panic!("Only transaction proof request dispatches transaction proof requests; qed"), + } + } + fn tick(&self, ctx: &BasicContext) { self.dispatch_orphaned(ctx) } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 3964137d9..3a72db51d 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -16,12 +16,18 @@ //! Request types, verification, and verification errors. +use std::sync::Arc; + use ethcore::basic_account::BasicAccount; use ethcore::encoded; +use ethcore::engines::Engine; +use ethcore::env_info::EnvInfo; use ethcore::receipt::Receipt; +use ethcore::state::{self, ProvedExecution}; +use ethcore::transaction::SignedTransaction; use rlp::{RlpStream, Stream, UntrustedRlp, View}; -use util::{Address, Bytes, HashDB, H256, U256}; +use util::{Address, Bytes, DBValue, HashDB, H256, U256}; use util::memorydb::MemoryDB; use util::sha3::Hashable; use util::trie::{Trie, TrieDB, TrieError}; @@ -231,6 +237,33 @@ impl Code { } } +/// Request for transaction execution, along with the parts necessary to verify the proof. +pub struct TransactionProof { + /// The transaction to request proof of. + pub tx: SignedTransaction, + /// Block header. + pub header: encoded::Header, + /// Transaction environment info. + pub env_info: EnvInfo, + /// Consensus engine. + pub engine: Arc, +} + +impl TransactionProof { + /// Check the proof, returning the proved execution or indicate that the proof was bad. + pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution { + let root = self.header.state_root(); + + state::check_proof( + state_items, + root, + &self.tx, + &*self.engine, + &self.env_info, + ) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 21936e1c5..8692e831a 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1616,7 +1616,6 @@ impl ::client::ProvingBlockChainClient for Client { let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false }; let res = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&transaction, options); - match res { Err(ExecutionError::Internal(_)) => return None, _ => return Some(state.drop().1.extract_proof()), diff --git a/ethcore/src/env_info.rs b/ethcore/src/env_info.rs index 9e1bb6a40..cc42008d5 100644 --- a/ethcore/src/env_info.rs +++ b/ethcore/src/env_info.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Environment information for transaction execution. + use std::cmp; use std::sync::Arc; use util::{U256, Address, H256, Hashable}; @@ -25,7 +27,7 @@ use ethjson; pub type LastHashes = Vec; /// Information concerning the execution environment for a message-call/contract-creation. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EnvInfo { /// The block number. pub number: BlockNumber, diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 15c5834cd..ea7de9e61 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -79,7 +79,6 @@ //! cargo build --release //! ``` - extern crate ethcore_io as io; extern crate rustc_serialize; extern crate crypto; @@ -140,12 +139,12 @@ pub mod action_params; pub mod db; pub mod verification; pub mod state; +pub mod env_info; #[macro_use] pub mod evm; mod cache_manager; mod blooms; mod basic_types; -mod env_info; mod pod_account; mod state_db; mod account_db; diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 7aff83c14..3c5a3bc09 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -31,6 +31,7 @@ use factory::Factories; use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; +use types::executed::{Executed, ExecutionError}; use types::state_diff::StateDiff; use transaction::SignedTransaction; use state_db::StateDB; @@ -60,6 +61,17 @@ pub struct ApplyOutcome { /// Result type for the execution ("application") of a transaction. pub type ApplyResult = Result; +/// Return type of proof validity check. +#[derive(Debug, Clone)] +pub enum ProvedExecution { + /// Proof wasn't enough to complete execution. + BadProof, + /// The transaction failed, but not due to a bad proof. + Failed(ExecutionError), + /// The transaction successfully completd with the given proof. + Complete(Executed), +} + #[derive(Eq, PartialEq, Clone, Copy, Debug)] /// Account modification state. Used to check if the account was /// Modified in between commits and overall. @@ -150,6 +162,39 @@ impl AccountEntry { } } +/// Check the given proof of execution. +/// `Err(ExecutionError::Internal)` indicates failure, everything else indicates +/// a successful proof (as the transaction itself may be poorly chosen). +pub fn check_proof( + proof: &[::util::DBValue], + root: H256, + transaction: &SignedTransaction, + engine: &Engine, + env_info: &EnvInfo, +) -> ProvedExecution { + let backend = self::backend::ProofCheck::new(proof); + let mut factories = Factories::default(); + factories.accountdb = ::account_db::Factory::Plain; + + let res = State::from_existing( + backend, + root, + engine.account_start_nonce(), + factories + ); + + let mut state = match res { + Ok(state) => state, + Err(_) => return ProvedExecution::BadProof, + }; + + match state.execute(env_info, engine, transaction, false) { + Ok(executed) => ProvedExecution::Complete(executed), + Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof, + Err(e) => ProvedExecution::Failed(e), + } +} + /// Representation of the entire state of all accounts in the system. /// /// `State` can work together with `StateDB` to share account cache. @@ -548,16 +593,12 @@ impl State { Ok(()) } - /// Execute a given transaction. + /// Execute a given transaction, producing a receipt and an optional trace. /// This will change the state accordingly. pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult { // let old = self.to_pod(); - let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true }; - let vm_factory = self.factories.vm.clone(); - let e = Executive::new(self, env_info, engine, &vm_factory).transact(t, options)?; - - // TODO uncomment once to_pod() works correctly. + let e = self.execute(env_info, engine, t, tracing)?; // trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod())); let state_root = if env_info.number < engine.params().eip98_transition { self.commit()?; @@ -570,6 +611,15 @@ impl State { Ok(ApplyOutcome{receipt: receipt, trace: e.trace}) } + // Execute a given transaction. + fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> Result { + let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true }; + let vm_factory = self.factories.vm.clone(); + + Executive::new(self, env_info, engine, &vm_factory).transact(t, options) + } + + /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. #[cfg_attr(feature="dev", allow(match_ref_pats))] From 645011427ab9b1b9a08a31ecf0852c72d370413c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 26 Feb 2017 13:48:56 +0100 Subject: [PATCH 12/89] most of proved_execution rpc --- rpc/src/v1/impls/eth.rs | 14 +++++---- rpc/src/v1/impls/light/eth.rs | 59 ++++++++++++++++++++++++++++------- rpc/src/v1/traits/eth.rs | 8 ++--- 3 files changed, 59 insertions(+), 22 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index f47ab2055..cf8bdbbe1 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -627,26 +627,28 @@ impl Eth for EthClient where self.send_raw_transaction(raw) } - fn call(&self, request: CallRequest, num: Trailing) -> Result { + fn call(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); let signed = self.sign_call(request)?; let result = match num.0 { - BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), - num => take_weak!(self.client).call(&signed, num.into(), Default::default()), + BlockNumber::Pending => take_weakf!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + num => take_weakf!(self.client).call(&signed, num.into(), Default::default()), }; - result + future::done(result .map(|b| b.output.into()) .map_err(errors::from_call_error) + ).boxed() } - fn estimate_gas(&self, request: CallRequest, num: Trailing) -> Result { + fn estimate_gas(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); let signed = self.sign_call(request)?; - take_weak!(self.client).estimate_gas(&signed, num.0.into()) + future::done(take_weakf!(self.client).estimate_gas(&signed, num.0.into()) .map(Into::into) .map_err(errors::from_call_error) + ).boxed() } fn compile_lll(&self, _: String) -> Result { diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 2e129d31e..fdd5e193b 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -38,7 +38,7 @@ use rlp::{UntrustedRlp, View}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use util::{RwLock, U256}; -use futures::{future, Future, BoxFuture}; +use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; @@ -153,6 +153,27 @@ impl EthClient { .unwrap_or_else(|| future::err(err_no_context()).boxed()) }).boxed() } + + // helper for getting proved execution. + fn proved_execution(&self, req: CallRequest, num: Trailing) -> Result, Error> { + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + let req: CRequest = req.into(); + let id = num.0.into(); + + let from = request.from.unwrap_or(Address::zero()); + let action = request.to.map_or(Action::Create, Action::Call); + let gas: request.gas.unwrap_or(U256::from(10_000_000)); + let value = request.value.unwrap_or_else(U256::zero); + let data = request.data.map_or_else(Vec::new, |d| d.to_vec()); + + sync.with_context(|ctx| { + let nonce_fut = req.nonce.map(Some).ok_or(err_no_context()) + .or_else(|_| self.account(from, id).map(|acc| acc.map(|a| a.nonce))); + + let gas_price_fut = req.gas_price.map(Some).ok_or(err_no_context()) + .or_else(|_| unimplemented!()) + }) + } } impl Eth for EthClient { @@ -328,12 +349,25 @@ impl Eth for EthClient { self.send_raw_transaction(raw) } - fn call(&self, req: CallRequest, num: Trailing) -> Result { - Err(errors::unimplemented(None)) + fn call(&self, req: CallRequest, num: Trailing) -> BoxFuture { + self.proved_execution().and_then(|res| { + match res { + Ok(Some(exec)) => Ok(exec.output.into()), + Ok(None) => Err(errors::unknown_block()), + Err(e) => Err(errors::execution(e)), + } + }).boxed() } - fn estimate_gas(&self, req: CallRequest, num: Trailing) -> Result { - Err(errors::unimplemented(None)) + fn estimate_gas(&self, req: CallRequest, num: Trailing) -> BoxFuture { + // TODO: binary chop for more accurate estimates. + self.proved_execution().and_then(|res| { + match res { + Ok(Some(exec)) => Ok((exec.refunded + exec.gas_used).into()), + Ok(None) => Err(errors::unknown_block()), + Err(e) => Err(errors::execution(e)), + } + }).boxed() } fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { @@ -361,19 +395,20 @@ impl Eth for EthClient { } fn compilers(&self) -> Result, Error> { - Err(errors::unimplemented(None)) + Err(errors::deprecated("Compilation functionality is deprecated.".to_string())) + } - fn compile_lll(&self, _code: String) -> Result { - Err(errors::unimplemented(None)) + fn compile_lll(&self, _: String) -> Result { + Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string())) } - fn compile_solidity(&self, _code: String) -> Result { - Err(errors::unimplemented(None)) + fn compile_serpent(&self, _: String) -> Result { + Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string())) } - fn compile_serpent(&self, _code: String) -> Result { - Err(errors::unimplemented(None)) + fn compile_solidity(&self, _: String) -> Result { + Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string())) } fn logs(&self, _filter: Filter) -> Result, Error> { diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index eaf608c60..365ad9320 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -110,12 +110,12 @@ build_rpc_trait! { fn submit_transaction(&self, Bytes) -> Result; /// Call contract, returning the output data. - #[rpc(name = "eth_call")] - fn call(&self, CallRequest, Trailing) -> Result; + #[rpc(async, name = "eth_call")] + fn call(&self, CallRequest, Trailing) -> BoxFuture; /// Estimate gas needed for execution of given contract. - #[rpc(name = "eth_estimateGas")] - fn estimate_gas(&self, CallRequest, Trailing) -> Result; + #[rpc(async, name = "eth_estimateGas")] + fn estimate_gas(&self, CallRequest, Trailing) -> BoxFuture; /// Get transaction by its hash. #[rpc(name = "eth_getTransactionByHash")] From af235e564ebadf5e2039b65baa2c080b455ddbe9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 26 Feb 2017 15:05:33 +0100 Subject: [PATCH 13/89] proved execution future --- ethcore/light/src/client/mod.rs | 24 +++++--- rpc/src/v1/helpers/dispatch.rs | 99 +++++++++++++++++++------------ rpc/src/v1/impls/eth.rs | 12 +++- rpc/src/v1/impls/light/eth.rs | 102 +++++++++++++++++++++++++------- 4 files changed, 166 insertions(+), 71 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 4a4da2917..2872e0eec 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -230,22 +230,32 @@ impl Client { } /// Get a handle to the verification engine. - pub fn engine(&self) -> &Engine { - &*self.engine + pub fn engine(&self) -> &Arc { + &self.engine } - fn latest_env_info(&self) -> EnvInfo { - let header = self.best_block_header(); + /// Get the latest environment info. + pub fn latest_env_info(&self) -> EnvInfo { + self.env_info(BlockId::Latest) + .expect("Best block header and recent hashes always stored; qed") + } - EnvInfo { + /// Get environment info for a given block. + pub fn env_info(&self, id: BlockId) -> Option { + let header = match self.block_header(id) { + Some(hdr) => hdr, + None => return None, + }; + + Some(EnvInfo { number: header.number(), author: header.author(), timestamp: header.timestamp(), difficulty: header.difficulty(), - last_hashes: self.build_last_hashes(header.hash()), + last_hashes: self.build_last_hashes(header.parent_hash()), gas_used: Default::default(), gas_limit: header.gas_limit(), - } + }) } fn build_last_hashes(&self, mut parent_hash: H256) -> Arc> { diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 0bea7f9a1..b11ada048 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -158,6 +158,54 @@ impl Dispatcher for FullDispatcher, + client: Arc, + on_demand: Arc, + cache: Arc>, +) -> BoxFuture, Error> { + const GAS_PRICE_SAMPLE_SIZE: usize = 100; + + if let Some(cached) = cache.lock().gas_price_corpus() { + return future::ok(cached).boxed() + } + + let cache = cache.clone(); + let eventual_corpus = sync.with_context(|ctx| { + // get some recent headers with gas used, + // and request each of the blocks from the network. + let block_futures = client.ancestry_iter(BlockId::Latest) + .filter(|hdr| hdr.gas_used() != U256::default()) + .take(GAS_PRICE_SAMPLE_SIZE) + .map(request::Body::new) + .map(|req| on_demand.block(ctx, req)); + + // as the blocks come in, collect gas prices into a vector + stream::futures_unordered(block_futures) + .fold(Vec::new(), |mut v, block| { + for t in block.transaction_views().iter() { + v.push(t.gas_price()) + } + + future::ok(v) + }) + .map(move |v| { + // produce a corpus from the vector, cache it, and return + // the median as the intended gas price. + let corpus: ::stats::Corpus<_> = v.into(); + cache.lock().set_gas_price_corpus(corpus.clone()); + corpus + }) + }); + + match eventual_corpus { + Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } +} + /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. /// Light client `ETH` RPC. #[derive(Clone)] @@ -197,44 +245,12 @@ impl LightDispatcher { /// Get a recent gas price corpus. // TODO: this could be `impl Trait`. pub fn gas_price_corpus(&self) -> BoxFuture, Error> { - const GAS_PRICE_SAMPLE_SIZE: usize = 100; - - if let Some(cached) = self.cache.lock().gas_price_corpus() { - return future::ok(cached).boxed() - } - - let cache = self.cache.clone(); - let eventual_corpus = self.sync.with_context(|ctx| { - // get some recent headers with gas used, - // and request each of the blocks from the network. - let block_futures = self.client.ancestry_iter(BlockId::Latest) - .filter(|hdr| hdr.gas_used() != U256::default()) - .take(GAS_PRICE_SAMPLE_SIZE) - .map(request::Body::new) - .map(|req| self.on_demand.block(ctx, req)); - - // as the blocks come in, collect gas prices into a vector - stream::futures_unordered(block_futures) - .fold(Vec::new(), |mut v, block| { - for t in block.transaction_views().iter() { - v.push(t.gas_price()) - } - - future::ok(v) - }) - .map(move |v| { - // produce a corpus from the vector, cache it, and return - // the median as the intended gas price. - let corpus: ::stats::Corpus<_> = v.into(); - cache.lock().set_gas_price_corpus(corpus.clone()); - corpus - }) - }); - - match eventual_corpus { - Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(), - None => future::err(errors::network_disabled()).boxed(), - } + fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + ) } /// Get an account's next nonce. @@ -285,7 +301,12 @@ impl Dispatcher for LightDispatcher { // fast path for known gas price. match request_gas_price { Some(gas_price) => future::ok(with_gas_price(gas_price)).boxed(), - None => self.gas_price_corpus().and_then(|corp| match corp.median() { + None => fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone() + ).and_then(|corp| match corp.median() { Some(median) => future::ok(*median), None => future::ok(DEFAULT_GAS_PRICE), // fall back to default on error. }).map(with_gas_price).boxed() diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index cf8bdbbe1..47143ac75 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -629,10 +629,13 @@ impl Eth for EthClient where fn call(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = self.sign_call(request)?; + let signed = match self.sign_call(request) { + Ok(signed) => signed, + Err(e) => return future::err(e).boxed(), + }; let result = match num.0 { - BlockNumber::Pending => take_weakf!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + BlockNumber::Pending => take_weakf!(self.miner).call(&*take_weakf!(self.client), &signed, Default::default()), num => take_weakf!(self.client).call(&signed, num.into(), Default::default()), }; @@ -644,7 +647,10 @@ impl Eth for EthClient where fn estimate_gas(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = self.sign_call(request)?; + let signed = match self.sign_call(request) { + Ok(signed) => signed, + Err(e) => return future::err(e).boxed(), + }; future::done(take_weakf!(self.client).estimate_gas(&signed, num.0.into()) .map(Into::into) .map_err(errors::from_call_error) diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index f6be478fa..f889faf00 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -24,6 +24,7 @@ use std::sync::Arc; use jsonrpc_core::Error; use jsonrpc_macros::Trailing; +use light::cache::Cache as LightDataCache; use light::client::Client as LightClient; use light::{cht, TransactionQueue}; use light::on_demand::{request, OnDemand}; @@ -31,17 +32,18 @@ use light::on_demand::{request, OnDemand}; use ethcore::account_provider::{AccountProvider, DappId}; use ethcore::basic_account::BasicAccount; use ethcore::encoded; +use ethcore::executed::{Executed, ExecutionError}; use ethcore::ids::BlockId; -use ethcore::transaction::SignedTransaction; +use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction}; use ethsync::LightSync; use rlp::{UntrustedRlp, View}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use util::{RwLock, U256}; +use util::{RwLock, Mutex, FixedHash, Uint, U256}; use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; -use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; +use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch}; use v1::helpers::block_import::is_major_importing; use v1::traits::Eth; use v1::types::{ @@ -60,6 +62,7 @@ pub struct EthClient { on_demand: Arc, transaction_queue: Arc>, accounts: Arc, + cache: Arc>, } // helper for internal error: on demand sender cancelled. @@ -67,6 +70,8 @@ fn err_premature_cancel(_cancel: oneshot::Canceled) -> Error { errors::internal("on-demand sender prematurely cancelled", "") } +type ExecutionResult = Result; + impl EthClient { /// Create a new `EthClient` with a handle to the light sync instance, client, /// and on-demand request service, which is assumed to be attached as a handler. @@ -76,6 +81,7 @@ impl EthClient { on_demand: Arc, transaction_queue: Arc>, accounts: Arc, + cache: Arc>, ) -> Self { EthClient { sync: sync, @@ -83,6 +89,7 @@ impl EthClient { on_demand: on_demand, transaction_queue: transaction_queue, accounts: accounts, + cache: cache, } } @@ -149,24 +156,77 @@ impl EthClient { } // helper for getting proved execution. - fn proved_execution(&self, req: CallRequest, num: Trailing) -> Result, Error> { - let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + fn proved_execution(&self, req: CallRequest, num: Trailing) -> BoxFuture { + const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]); + + + let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone()); let req: CRequest = req.into(); let id = num.0.into(); - let from = request.from.unwrap_or(Address::zero()); - let action = request.to.map_or(Action::Create, Action::Call); - let gas: request.gas.unwrap_or(U256::from(10_000_000)); - let value = request.value.unwrap_or_else(U256::zero); - let data = request.data.map_or_else(Vec::new, |d| d.to_vec()); + let from = req.from.unwrap_or(Address::zero()); + let nonce_fut = match req.nonce { + Some(nonce) => future::ok(Some(nonce)).boxed(), + None => self.account(from, id).map(|acc| acc.map(|a| a.nonce)).boxed(), + }; - sync.with_context(|ctx| { - let nonce_fut = req.nonce.map(Some).ok_or(err_no_context()) - .or_else(|_| self.account(from, id).map(|acc| acc.map(|a| a.nonce))); + let gas_price_fut = match req.gas_price { + Some(price) => future::ok(price).boxed(), + None => dispatch::fetch_gas_price_corpus( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + ).map(|corp| match corp.median() { + Some(median) => *median, + None => DEFAULT_GAS_PRICE, + }).boxed() + }; - let gas_price_fut = req.gas_price.map(Some).ok_or(err_no_context()) - .or_else(|_| unimplemented!()) - }) + // if nonce resolves, this should too since it'll be in the LRU-cache. + let header_fut = self.header(id); + + // fetch missing transaction fields from the network. + nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| { + let action = req.to.map_or(Action::Create, Action::Call); + let gas = req.gas.unwrap_or(U256::from(10_000_000)); // better gas amount? + let value = req.value.unwrap_or_else(U256::zero); + let data = req.data.map_or_else(Vec::new, |d| d.to_vec()); + + future::done(match nonce { + Some(n) => Ok(EthTransaction { + nonce: n, + action: action, + gas: gas, + gas_price: gas_price, + value: value, + data: data, + }.fake_sign(from)), + None => Err(errors::unknown_block()), + }) + }).join(header_fut).and_then(move |(tx, hdr)| { + // then request proved execution. + // TODO: get last-hashes from network. + let (env_info, hdr) = match (client.env_info(id), hdr) { + (Some(env_info), Some(hdr)) => (env_info, hdr), + _ => return future::err(errors::unknown_block()).boxed(), + }; + let request = request::TransactionProof { + tx: tx, + header: hdr, + env_info: env_info, + engine: client.engine().clone(), + }; + + let proved_future = sync.with_context(move |ctx| { + on_demand.transaction_proof(ctx, request).map_err(err_premature_cancel).boxed() + }); + + match proved_future { + Some(fut) => fut.boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + }).boxed() } } @@ -344,10 +404,9 @@ impl Eth for EthClient { } fn call(&self, req: CallRequest, num: Trailing) -> BoxFuture { - self.proved_execution().and_then(|res| { + self.proved_execution(req, num).and_then(|res| { match res { - Ok(Some(exec)) => Ok(exec.output.into()), - Ok(None) => Err(errors::unknown_block()), + Ok(exec) => Ok(exec.output.into()), Err(e) => Err(errors::execution(e)), } }).boxed() @@ -355,10 +414,9 @@ impl Eth for EthClient { fn estimate_gas(&self, req: CallRequest, num: Trailing) -> BoxFuture { // TODO: binary chop for more accurate estimates. - self.proved_execution().and_then(|res| { + self.proved_execution(req, num).and_then(|res| { match res { - Ok(Some(exec)) => Ok((exec.refunded + exec.gas_used).into()), - Ok(None) => Err(errors::unknown_block()), + Ok(exec) => Ok((exec.refunded + exec.gas_used).into()), Err(e) => Err(errors::execution(e)), } }).boxed() From bbb50caa893de2ee126af16f1f4e36b3c9f0bdf7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 3 Mar 2017 19:25:29 +0100 Subject: [PATCH 14/89] initial request definitions --- ethcore/light/src/lib.rs | 2 +- ethcore/light/src/types/les_request.rs | 228 -------- ethcore/light/src/types/mod.rs.in | 2 +- ethcore/light/src/types/request.rs | 707 +++++++++++++++++++++++++ 4 files changed, 709 insertions(+), 230 deletions(-) delete mode 100644 ethcore/light/src/types/les_request.rs create mode 100644 ethcore/light/src/types/request.rs diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index b6e06a02b..ebf5f4f08 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -57,7 +57,7 @@ mod types; pub use self::provider::Provider; pub use self::transaction_queue::TransactionQueue; -pub use types::les_request as request; +pub use types::request as request; #[macro_use] extern crate log; diff --git a/ethcore/light/src/types/les_request.rs b/ethcore/light/src/types/les_request.rs deleted file mode 100644 index dbff19eb5..000000000 --- a/ethcore/light/src/types/les_request.rs +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! LES request types. - -use ethcore::transaction::Action; -use util::{Address, H256, U256, Uint}; - -/// Either a hash or a number. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum HashOrNumber { - /// Block hash variant. - Hash(H256), - /// Block number variant. - Number(u64), -} - -impl From for HashOrNumber { - fn from(hash: H256) -> Self { - HashOrNumber::Hash(hash) - } -} - -impl From for HashOrNumber { - fn from(num: u64) -> Self { - HashOrNumber::Number(num) - } -} - -/// A request for block headers. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Headers { - /// Starting block number or hash. - pub start: HashOrNumber, - /// The maximum amount of headers which can be returned. - pub max: usize, - /// The amount of headers to skip between each response entry. - pub skip: u64, - /// Whether the headers should proceed in falling number from the initial block. - pub reverse: bool, -} - -/// A request for specific block bodies. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Bodies { - /// Hashes which bodies are being requested for. - pub block_hashes: Vec -} - -/// A request for transaction receipts. -/// -/// This request is answered with a list of transaction receipts for each block -/// requested. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct Receipts { - /// Block hashes to return receipts for. - pub block_hashes: Vec, -} - -/// A request for a state proof -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct StateProof { - /// Block hash to query state from. - pub block: H256, - /// Key of the state trie -- corresponds to account hash. - pub key1: H256, - /// Key in that account's storage trie; if empty, then the account RLP should be - /// returned. - pub key2: Option, - /// if greater than zero, trie nodes beyond this level may be omitted. - pub from_level: u32, // could even safely be u8; trie w/ 32-byte key can be at most 64-levels deep. -} - -/// A request for state proofs. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct StateProofs { - /// All the proof requests. - pub requests: Vec, -} - -/// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct ContractCode { - /// Block hash - pub block_hash: H256, - /// Account key (== sha3(address)) - pub account_key: H256, -} - -/// A request for contract code. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct ContractCodes { - /// Block hash and account key (== sha3(address)) pairs to fetch code for. - pub code_requests: Vec, -} - -/// A request for a header proof from the Canonical Hash Trie. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct HeaderProof { - /// Number of the CHT. - pub cht_number: u64, - /// Block number requested. May not be 0: genesis isn't included in any CHT. - pub block_number: u64, - /// If greater than zero, trie nodes beyond this level may be omitted. - pub from_level: u32, -} - -/// A request for header proofs from the CHT. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct HeaderProofs { - /// All the proof requests. - pub requests: Vec, -} - -/// A request for proof of (simulated) transaction execution. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub struct TransactionProof { - /// Block hash to request for. - pub at: H256, - /// Address to treat as the caller. - pub from: Address, - /// Action to take: either a call or a create. - pub action: Action, - /// Amount of gas to request proof-of-execution for. - pub gas: U256, - /// Price for each gas. - pub gas_price: U256, - /// Value to simulate sending. - pub value: U256, - /// Transaction data. - pub data: Vec, -} - -/// Kinds of requests. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum Kind { - /// Requesting headers. - Headers, - /// Requesting block bodies. - Bodies, - /// Requesting transaction receipts. - Receipts, - /// Requesting proofs of state trie nodes. - StateProofs, - /// Requesting contract code by hash. - Codes, - /// Requesting header proofs (from the CHT). - HeaderProofs, - /// Requesting proof of transaction execution. - TransactionProof, -} - -/// Encompasses all possible types of requests in a single structure. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "ipc", binary)] -pub enum Request { - /// Requesting headers. - Headers(Headers), - /// Requesting block bodies. - Bodies(Bodies), - /// Requesting transaction receipts. - Receipts(Receipts), - /// Requesting state proofs. - StateProofs(StateProofs), - /// Requesting contract codes. - Codes(ContractCodes), - /// Requesting header proofs. - HeaderProofs(HeaderProofs), - /// Requesting proof of transaction execution. - TransactionProof(TransactionProof), -} - -impl Request { - /// Get the kind of request this is. - pub fn kind(&self) -> Kind { - match *self { - Request::Headers(_) => Kind::Headers, - Request::Bodies(_) => Kind::Bodies, - Request::Receipts(_) => Kind::Receipts, - Request::StateProofs(_) => Kind::StateProofs, - Request::Codes(_) => Kind::Codes, - Request::HeaderProofs(_) => Kind::HeaderProofs, - Request::TransactionProof(_) => Kind::TransactionProof, - } - } - - /// Get the amount of requests being made. - /// In the case of `TransactionProof`, this is the amount of gas being requested. - pub fn amount(&self) -> usize { - match *self { - Request::Headers(ref req) => req.max, - Request::Bodies(ref req) => req.block_hashes.len(), - Request::Receipts(ref req) => req.block_hashes.len(), - Request::StateProofs(ref req) => req.requests.len(), - Request::Codes(ref req) => req.code_requests.len(), - Request::HeaderProofs(ref req) => req.requests.len(), - Request::TransactionProof(ref req) => match req.gas > usize::max_value().into() { - true => usize::max_value(), - false => req.gas.low_u64() as usize, - } - } - } -} diff --git a/ethcore/light/src/types/mod.rs.in b/ethcore/light/src/types/mod.rs.in index 0adfbf0e4..eba551b53 100644 --- a/ethcore/light/src/types/mod.rs.in +++ b/ethcore/light/src/types/mod.rs.in @@ -14,4 +14,4 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -pub mod les_request; \ No newline at end of file +pub mod request; diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs new file mode 100644 index 000000000..279296cf8 --- /dev/null +++ b/ethcore/light/src/types/request.rs @@ -0,0 +1,707 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light protocol request types. + +use std::collections::HashMap; + +use ethcore::transaction::Action; +use util::{Address, H256, U256, Uint}; + +// re-exports of request types. +pub use self::header::{ + Complete as CompleteHeadersRequest, + Incomplete as IncompleteHeadersRequest, + Response as HeadersResponse +}; +pub use self::header_proof::{ + Complete as CompleteHeaderProofRequest, + Incomplete as IncompleteHeaderProofRequest, + Response as HeaderProofResponse +}; +pub use self::block_body::{ + Complete as CompleteBodyRequest, + Incomplete as IncompleteBodyRequest, + Response as BodyResponse +}; +pub use self::receipts::{ + Complete as CompleteReceiptsRequest, + Incomplete as IncompleteReceiptsRequest + Response as ReceiptsResponse +}; +pub use self::account::{ + Complete as CompleteAccountRequest, + Incomplete as IncompleteAccountRequest, + Response as AccountResponse, +}; +pub use self::storage::{ + Complete as CompleteStorageRequest, + Incomplete as IncompleteStorageRequest, + Response as StorageResponse +}; +pub use self::contract_code::{ + Complete as CompleteCodeRequest, + Incomplete as IncompleteCodeRequest, + Response as CodeResponse, +}; + +/// Error indicating a reference to a non-existent or wrongly-typed output. +pub struct NoSuchOutput; + +/// An input to a request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Field { + /// A pre-specified input. + Scalar(T), + /// An input which can be resolved later on. + /// (Request index, output index) + BackReference(usize, usize), +} + +impl From for Field { + fn from(val: T) -> Self { + Field::Scalar(val) + } +} + +/// Request outputs which can be reused as inputs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Output { + /// A 32-byte hash output. + Hash(H256), + /// An unsigned-integer output. + Number(u64), +} + +/// Response output kinds which can be used as back-references. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OutputKind { + /// A 32-byte hash output. + Hash, + /// An unsigned-integer output. + Number, +} + +/// Either a hash or a number. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "ipc", binary)] +pub enum HashOrNumber { + /// Block hash variant. + Hash(H256), + /// Block number variant. + Number(u64), +} + +impl From for HashOrNumber { + fn from(hash: H256) -> Self { + HashOrNumber::Hash(hash) + } +} + +impl From for HashOrNumber { + fn from(num: u64) -> Self { + HashOrNumber::Number(num) + } +} + +/// A potentially incomplete request. +pub trait IncompleteRequest: Sized { + type Complete; + + /// Check prior outputs against the needed inputs. + /// + /// This is called to ensure consistency of this request with + /// others in the same packet. + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>; + + /// Note that this request will produce the following outputs. + fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind); + + /// Fill the request. + /// + /// This function is provided an "output oracle" which allows fetching of + /// prior request outputs. + /// Only outputs previously checked with `check_outputs` will be available. + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result; +} + +/// Header request. +pub mod header { + use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::U256; + + /// Potentially incomplete headers request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Start block. + pub start: Field, + /// Skip between. + pub skip: U256, + /// Maximum to return. + pub max: U256, + /// Whether to reverse from start. + pub reverse: bool, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.start { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => + f(req, idx, OutputKind::Hash).or_else(|| f(req, idx, OutputKind::Number)) + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) { } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let start = match self.start { + Field::Scalar(start) => start, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash.into(), + Output::Number(num) => num.into(), + } + }; + + Ok(Complete { + start: start, + skip: self.skip, + max: self.max, + reverse: self.reverse, + }) + } + + } + + /// A complete header request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Start block. + pub start: HashOrNumber, + /// Skip between. + pub skip: U256, + /// Maximum to return. + pub max: U256, + /// Whether to reverse from start. + pub reverse: bool, + } + + /// The output of a request for headers. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + header: Vec, + } + + impl Response { + /// Fill reusable outputs by writing them into the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } + } +} + +/// Request and response for header proofs. +pub mod header_proof { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete header proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block number. + pub num: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Number), + } + } + + fn note_outputs(&self, mut note: F) where F: FnMut(usize, OutputKind) { + note(1, OutputKind::Hash); + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let num = match self.num { + Field::Scalar(num) => num, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Number(num) => num, + _ => return Err(NoSuchOutput), + } + }; + + Ok(Complete { + num: num, + }) + } + + } + + /// A complete header proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get a header proof for. + pub num: u64, + } + + /// The output of a request for a header proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion proof of the header and total difficulty in the CHT. + pub proof: Vec, + /// The proved header's hash. + pub hash: H256, + /// The proved header's total difficulty. + pub td: U256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(1, Output::Hash(self.hash)); + } + } +} + +/// Request and response for block receipts +pub mod block_receipts { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let hash = match self.hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput), + } + }; + + Ok(Complete { + hash: hash, + }) + } + + } + + /// A complete block receipts request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The number to get block receipts for. + pub hash: H256, + } + + /// The output of a request for block receipts. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The block receipts. + pub receipts: Vec + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } +} + +/// Request and response for a block body +pub mod block_body { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete block body request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to get receipts for. + pub hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match self.num { + Field::Scalar(_) => Ok(()), + Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), + } + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let hash = match self.hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput), + } + }; + + Ok(Complete { + hash: hash, + }) + } + + } + + /// A complete block body request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The hash to get a block body for. + pub hash: H256, + } + + /// The output of a request for block body. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The block body. + pub body: encoded::Body, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } +} + +/// A request for an account proof. +pub mod account { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete request for an account proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } + + Ok(()) + } + + fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { + f(0, OutputKind::Hash); + f(1, OutputKind::Hash); + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let address_hash = match self.address_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + address_hash: address_hash, + }) + } + + } + + /// A complete request for an account. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + } + + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Account nonce. + pub nonce: U256, + /// Account balance. + pub balance: U256, + /// Account's code hash. + pub code_hash: H256, + /// Account's storage trie root. + pub storage_root: H256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(0, Output::Hash(self.code_hash)); + f(1, Output::Hash(self.storage_root)); + } + } +} + +/// A request for a storage proof. +pub mod storage { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete request for an storage proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// Block hash to request state proof for. + pub block_hash: Field, + /// Hash of the account's address. + pub address_hash: Field, + /// Hash of the storage key. + pub key_hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.address_hash { + f(req, idx, OutputKind::Hash)? + } + + if let Field::BackReference(req, idx) = self.key_hash { + f(req, idx, OutputKind::Hash)? + } + + Ok(()) + } + + fn note_outputs(&self, mut f: F) where F: FnMut(usize, OutputKind) { + f(0, OutputKind::Hash); + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let address_hash = match self.address_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let key_hash = match self.key_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + address_hash: address_hash, + key_hash: key_hash + }) + } + + } + + /// A complete request for a storage proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// Block hash to request state proof for. + pub block_hash: H256, + /// Hash of the account's address. + pub address_hash: H256, + /// Storage key hash. + pub key_hash: H256, + } + + /// The output of a request for an account state proof. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// Inclusion/exclusion proof + pub proof: Vec, + /// Storage value. + pub value: H256, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + f(0, Output::Hash(self.value)); + } + } +} + +/// A request for contract code. +pub mod contract_code { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use util::{Bytes, U256, H256}; + + /// Potentially incomplete _ request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The code hash. + pub code_hash: Field, + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + if let Field::BackReference(req, idx) = self.code_hash { + f(req, idx, OutputKind::Hash)?; + } + + Ok(()) + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + let code_hash = match self.code_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + code_hash: code_hash, + }) + } + + } + + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The code hash. + pub code_hash: H256, + } + + /// The output of a request for + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// The requested code. + pub code: Bytes, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } +} From 41effadb94ed4f36624d6d1af53d22133e764e78 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Mar 2017 12:21:06 +0100 Subject: [PATCH 15/89] RLP encoding and decoding for requests --- ethcore/light/src/client/header_chain.rs | 1 - ethcore/light/src/lib.rs | 2 +- ethcore/light/src/types/request.rs | 265 +++++++++++++++++++++++ 3 files changed, 266 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 575938cd5..9dcd25888 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -24,7 +24,6 @@ //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. // TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers` -// use std::collections::{BTreeMap, HashMap}; diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ebf5f4f08..ada58d8de 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -26,7 +26,7 @@ //! use-cases like sending transactions from a personal account. //! //! The light client performs a header-only sync, doing verification and pruning -//! historical blocks. Upon pruning, batches of 2048 blocks have a number => hash +//! historical blocks. Upon pruning, batches of 2048 blocks have a number => (hash, TD) //! mapping sealed into "canonical hash tries" which can later be used to verify //! historical block queries from peers. diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 279296cf8..259f3def7 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use ethcore::transaction::Action; +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Address, H256, U256, Uint}; // re-exports of request types. @@ -77,6 +78,32 @@ impl From for Field { } } +impl Decodable for Field { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.val_at::(0)? { + 0 => Ok(Field::Scalar(rlp.val_at::(1)?)), + 1 => Ok({ + let inner_rlp = rlp.at(1)?; + Field::BackReference(inner_rlp.val_at(0)?, inner_rlp.val_at(1)?) + }) + _ => Err(DecoderError::Custom("Unknown discriminant for PIP field.")), + } + } +} + +impl Encodable for Field { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + match *self { + Field::Scalar(ref data) => s.append(&0u8).append(data), + Field::BackReference(ref req, ref idx) => + s.append(&1u8).begin_list(2).append(req).append(idx), + }; + } +} + /// Request outputs which can be reused as inputs. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Output { @@ -117,6 +144,114 @@ impl From for HashOrNumber { } } +/// All request types, as they're sent over the network. +pub enum Request { + /// A request for block headers. + Headers(IncompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(IncompleteHeaderProofRequest), + // TransactionIndex, + /// A request for a block's receipts. + Receipts(IncompleteReceiptsRequest), + /// A request for a block body. + Body(IncompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(IncompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(IncompleteStorageRequest), + /// A request for contract code. + Code(IncompleteCodeRequest), + // Transaction proof. +} + +impl Request { + fn kind(&self) -> RequestKind { + match *self { + Request::Headers(_) => RequestKind::Headers, + Request::HeaderProof(_) => RequestKind::HeaderProof, + Request::Receipts(_) => RequestKind::Receipts, + Request::Body(_) => RequestKind::Body, + Request::Account(_) => RequestKind::Account, + Request::Storage(_) => RequestKind::Storage, + Request::Code(_) => RequestKind::Code, + } + } +} + +impl Decodable for Request { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.val_at::(0)? { + RequestKind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), + RequestKind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), + RequestKind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), + RequestKind::Body => Ok(Request::Body(rlp.val_at(1)?)), + RequestKind::Account => Ok(Request::Account(rlp.val_at(1)?)), + RequestKind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), + RequestKind::Code => Ok(Request::Code(rlp.val_at(1)?)), + } + } +} + +impl Encodable for Request { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2).append(&self.kind()); + + match *self { + Request::Headers(ref req) => s.append(req), + Request::HeaderProof(ref req) => s.append(req), + Request::Receipts(ref req) => s.append(req), + Request::Body(ref req) => s.append(req), + Request::Account(ref req) => s.append(req), + Request::Storage(ref req) => s.append(req), + Request::Code(ref req) => s.append(req), + }; + } +} + + +/// Kinds of requests. +/// Doubles as the "ID" field of the request. +#[repr(u8)] +pub enum RequestKind { + /// A request for headers. + Headers = 0, + HeaderProof = 1, + // TransactionIndex = 2, + Receipts = 3, + Body = 4, + Account = 5, + Storage = 6, + Code = 7, + // TransactionProof = 8, +} + +impl Decodable for RequestKind { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.as_val::()? { + 0 => Ok(RequestKind::Headers), + 1 => Ok(RequestKind::HeaderProof), + // 2 => Ok(RequestKind::TransactionIndex, + 3 => Ok(RequestKind::Receipts), + 4 => Ok(RequestKind::Body), + 5 => Ok(RequestKind::Account), + 6 => Ok(RequestKind::Storage), + 7 => Ok(RequestKind::Code), + // 8 => Ok(RequestKind::TransactionProof), + _ => Err(DecoderError::Custom("Unknown PIP request ID.")), + } + } +} + +impl Encodable for RequestKind { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(self as &u8); + } +} + /// A potentially incomplete request. pub trait IncompleteRequest: Sized { type Complete; @@ -144,6 +279,7 @@ pub trait IncompleteRequest: Sized { pub mod header { use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::U256; /// Potentially incomplete headers request. @@ -159,6 +295,28 @@ pub mod header { pub reverse: bool, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + start: rlp.val_at(0)?, + skip: rlp.val_at(1)?, + max: rlp.val_at(2)?, + reverse: rlp.val_at(3)? + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(4) + .append(&self.start) + .append(&self.skip) + .append(&self.max) + .append(&self.reverse); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -223,6 +381,7 @@ pub mod header { /// Request and response for header proofs. pub mod header_proof { use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete header proof request. @@ -232,6 +391,21 @@ pub mod header_proof { pub num: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + num: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.num); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -295,6 +469,7 @@ pub mod header_proof { /// Request and response for block receipts pub mod block_receipts { use super::{Field, NoSuchOutput, OutputKind, Output}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete block receipts request. @@ -304,6 +479,21 @@ pub mod block_receipts { pub hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + hash: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -360,6 +550,7 @@ pub mod block_receipts { pub mod block_body { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete block body request. @@ -369,6 +560,21 @@ pub mod block_body { pub hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + hash: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(1).append(&self.hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -425,6 +631,7 @@ pub mod block_body { pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an account proof. @@ -436,6 +643,24 @@ pub mod account { pub address_hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address_hash: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.block_hash) + .append(&self.address_hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -522,6 +747,7 @@ pub mod account { pub mod storage { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an storage proof. @@ -535,6 +761,26 @@ pub mod storage { pub key_hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address_hash: rlp.val_at(1)?, + key_hash: rlp.val_at(2)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append(&self.block_hash) + .append(&self.address_hash) + .append(&self.key_hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; @@ -628,6 +874,7 @@ pub mod storage { pub mod contract_code { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; use util::{Bytes, U256, H256}; /// Potentially incomplete _ request. @@ -639,6 +886,24 @@ pub mod contract_code { pub code_hash: Field, } + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + code_hash: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.block_hash) + .append(&self.code_hash); + } + } + impl super::IncompleteRequest for Incomplete { type Complete = Complete; From 8e9faa416da13b70e9dbc751b6285925bce77382 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Mar 2017 17:03:58 +0100 Subject: [PATCH 16/89] proofs of non-existance in ProvingBlockChainClient --- ethcore/src/client/client.rs | 17 +++--------- ethcore/src/client/test_client.rs | 13 ++++----- ethcore/src/client/traits.rs | 12 +++------ ethcore/src/state/account.rs | 13 ++++----- ethcore/src/state/mod.rs | 45 +++++++++++++++---------------- 5 files changed, 40 insertions(+), 60 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 63be1da07..54e433a72 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1585,23 +1585,14 @@ impl MayPanic for Client { } impl ::client::ProvingBlockChainClient for Client { - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec { + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)> { self.state_at(id) - .and_then(move |state| state.prove_storage(key1, key2, from_level).ok()) - .unwrap_or_else(Vec::new) + .and_then(move |state| state.prove_storage(key1, key2).ok()) } - fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec { + fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, ::types::basic_account::BasicAccount)> { self.state_at(id) - .and_then(move |state| state.prove_account(key1, from_level).ok()) - .unwrap_or_else(Vec::new) - } - - fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes { - self.state_at(id) - .and_then(move |state| state.code_by_address_hash(account_key).ok()) - .and_then(|x| x) - .unwrap_or_else(Vec::new) + .and_then(move |state| state.prove_account(key1).ok()) } fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 5d436f4c5..79783175d 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -38,6 +38,7 @@ use error::{ImportResult, Error as EthcoreError}; use evm::{Factory as EvmFactory, VMType, Schedule}; use miner::{Miner, MinerService, TransactionImportResult}; use spec::Spec; +use types::basic_account::BasicAccount; use types::mode::Mode; use types::pruning_info::PruningInfo; @@ -754,16 +755,12 @@ impl BlockChainClient for TestBlockChainClient { } impl ProvingBlockChainClient for TestBlockChainClient { - fn prove_storage(&self, _: H256, _: H256, _: u32, _: BlockId) -> Vec { - Vec::new() + fn prove_storage(&self, _: H256, _: H256, _: BlockId) -> Option<(Vec, H256)> { + None } - fn prove_account(&self, _: H256, _: u32, _: BlockId) -> Vec { - Vec::new() - } - - fn code_by_hash(&self, _: H256, _: BlockId) -> Bytes { - Vec::new() + fn prove_account(&self, _: H256, _: BlockId) -> Option<(Vec, BasicAccount)> { + None } fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option> { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 4af20de0f..145398ef6 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -34,6 +34,7 @@ use env_info::LastHashes; use block_import_error::BlockImportError; use ipc::IpcConfig; use types::ids::*; +use types::basic_account::BasicAccount; use types::trace_filter::Filter as TraceFilter; use types::call_analytics::CallAnalytics; use types::blockchain_info::BlockChainInfo; @@ -309,19 +310,12 @@ pub trait ProvingBlockChainClient: BlockChainClient { /// /// Both provided keys assume a secure trie. /// Returns a vector of raw trie nodes (in order from the root) proving the storage query. - /// Nodes after `from_level` may be omitted. - /// An empty vector indicates unservable query. - fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec; + fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec, H256)>; /// Prove account existence at a specific block id. /// The key is the keccak hash of the account's address. /// Returns a vector of raw trie nodes (in order from the root) proving the query. - /// Nodes after `from_level` may be omitted. - /// An empty vector indicates unservable query. - fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec; - - /// Get code by address hash. - fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes; + fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec, BasicAccount)>; /// Prove execution of a transaction at the given block. fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option>; diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index ebdf36d89..51f7e3b98 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -438,18 +438,19 @@ impl Account { /// trie. /// `storage_key` is the hash of the desired storage key, meaning /// this will only work correctly under a secure trie. - /// Returns a merkle proof of the storage trie node with all nodes before `from_level` - /// omitted. - pub fn prove_storage(&self, db: &HashDB, storage_key: H256, from_level: u32) -> Result, Box> { + pub fn prove_storage(&self, db: &HashDB, storage_key: H256) -> Result<(Vec, H256), Box> { use util::trie::{Trie, TrieDB}; use util::trie::recorder::Recorder; - let mut recorder = Recorder::with_depth(from_level); + let mut recorder = Recorder::new(); let trie = TrieDB::new(db, &self.storage_root)?; - let _ = trie.get_with(&storage_key, &mut recorder)?; + let item: U256 = { + let query = (&mut recorder, ::rlp::decode); + trie.get_with(&storage_key, query)?.unwrap_or_else(U256::zero) + }; - Ok(recorder.drain().into_iter().map(|r| r.data).collect()) + Ok((recorder.drain().into_iter().map(|r| r.data).collect(), item.into())) } } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 3c5a3bc09..745fb2980 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -31,6 +31,7 @@ use factory::Factories; use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; +use types::basic_account::BasicAccount; use types::executed::{Executed, ExecutionError}; use types::state_diff::StateDiff; use transaction::SignedTransaction; @@ -857,47 +858,43 @@ impl State { // State proof implementations; useful for light client protocols. impl State { /// Prove an account's existence or nonexistence in the state trie. - /// Returns a merkle proof of the account's trie node with all nodes before `from_level` - /// omitted or an encountered trie error. + /// Returns a merkle proof of the account's trie node omitted or an encountered trie error. + /// If the account doesn't exist in the trie, prove that and return defaults. /// Requires a secure trie to be used for accurate results. /// `account_key` == sha3(address) - pub fn prove_account(&self, account_key: H256, from_level: u32) -> trie::Result> { - let mut recorder = Recorder::with_depth(from_level); + pub fn prove_account(&self, account_key: H256) -> trie::Result<(Vec, BasicAccount)> { + let mut recorder = Recorder::new(); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; - trie.get_with(&account_key, &mut recorder)?; + let maybe_account: Option = { + let query = (&mut recorder, ::rlp::decode); + trie.get_with(&account_key, query)? + }; + let account = maybe_account.unwrap_or_else(|| BasicAccount { + balance: 0.into(), + nonce: self.account_start_nonce, + code_hash: SHA3_EMPTY, + storage_root: ::util::sha3::SHA3_NULL_RLP, + }); - Ok(recorder.drain().into_iter().map(|r| r.data).collect()) + Ok((recorder.drain().into_iter().map(|r| r.data).collect(), account)) } /// Prove an account's storage key's existence or nonexistence in the state. - /// Returns a merkle proof of the account's storage trie with all nodes before - /// `from_level` omitted. Requires a secure trie to be used for correctness. + /// Returns a merkle proof of the account's storage trie. + /// Requires a secure trie to be used for correctness. /// `account_key` == sha3(address) /// `storage_key` == sha3(key) - pub fn prove_storage(&self, account_key: H256, storage_key: H256, from_level: u32) -> trie::Result> { + pub fn prove_storage(&self, account_key: H256, storage_key: H256) -> trie::Result<(Vec, H256)> { // TODO: probably could look into cache somehow but it's keyed by // address, not sha3(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let acc = match trie.get_with(&account_key, Account::from_rlp)? { Some(acc) => acc, - None => return Ok(Vec::new()), + None => return Ok((Vec::new(), H256::new())), }; let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account_key); - acc.prove_storage(account_db.as_hashdb(), storage_key, from_level) - } - - /// Get code by address hash. - /// Only works when backed by a secure trie. - pub fn code_by_address_hash(&self, account_key: H256) -> trie::Result> { - let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; - let mut acc = match trie.get_with(&account_key, Account::from_rlp)? { - Some(acc) => acc, - None => return Ok(None), - }; - - let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account_key); - Ok(acc.cache_code(account_db.as_hashdb()).map(|c| (&*c).clone())) + acc.prove_storage(account_db.as_hashdb(), storage_key) } } From 87f3d53607266e6f055c97c0d9882d03e72e08d3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 6 Mar 2017 17:36:56 +0100 Subject: [PATCH 17/89] new requests in provider. --- ethcore/light/src/client/mod.rs | 47 ------- ethcore/light/src/provider.rs | 196 ++++++++++++----------------- ethcore/light/src/types/request.rs | 115 ++++++++++++++++- 3 files changed, 191 insertions(+), 167 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 2872e0eec..34f7ed990 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -315,50 +315,3 @@ impl LightChainClient for Client { Client::cht_root(self, i) } } - -// dummy implementation, should be removed when a `TestClient` is added. -impl ::provider::Provider for Client { - fn chain_info(&self) -> BlockChainInfo { - Client::chain_info(self) - } - - fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { - None - } - - fn earliest_state(&self) -> Option { - None - } - - fn block_header(&self, id: BlockId) -> Option { - Client::block_header(self, id) - } - - fn block_body(&self, _id: BlockId) -> Option { - None - } - - fn block_receipts(&self, _hash: &H256) -> Option { - None - } - - fn state_proof(&self, _req: ::request::StateProof) -> Vec { - Vec::new() - } - - fn contract_code(&self, _req: ::request::ContractCode) -> Bytes { - Vec::new() - } - - fn header_proof(&self, _req: ::request::HeaderProof) -> Option<(encoded::Header, Vec)> { - None - } - - fn transaction_proof(&self, _req: ::request::TransactionProof) -> Option> { - None - } - - fn ready_transactions(&self) -> Vec<::ethcore::transaction::PendingTransaction> { - Vec::new() - } -} diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 3f55a6b99..4e43296ab 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -59,10 +59,10 @@ pub trait Provider: Send + Sync { /// /// The returned vector may have any length in the range [0, `max`], but the /// results within must adhere to the `skip` and `reverse` parameters. - fn block_headers(&self, req: request::Headers) -> Vec { + fn block_headers(&self, req: request::CompleteHeadersRequest) -> Option { use request::HashOrNumber; - if req.max == 0 { return Vec::new() } + if req.max == 0 { return None } let best_num = self.chain_info().best_block_number; let start_num = match req.start { @@ -70,7 +70,7 @@ pub trait Provider: Send + Sync { HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { None => { trace!(target: "les_provider", "Unknown block hash {} requested", hash); - return Vec::new(); + return None; } Some(header) => { let num = header.number(); @@ -79,7 +79,9 @@ pub trait Provider: Send + Sync { if req.max == 1 || canon_hash != Some(hash) { // Non-canonical header or single header requested. - return vec![header]; + return Some(::request::HeadersResponse { + headers: vec![header], + }) } num @@ -87,109 +89,39 @@ pub trait Provider: Send + Sync { } }; - (0u64..req.max as u64) + let headers = (0u64..req.max as u64) .map(|x: u64| x.saturating_mul(req.skip + 1)) .take_while(|x| if req.reverse { x < &start_num } else { best_num.saturating_sub(start_num) >= *x }) .map(|x| if req.reverse { start_num - x } else { start_num + x }) .map(|x| self.block_header(BlockId::Number(x))) .take_while(|x| x.is_some()) .flat_map(|x| x) - .collect() + .collect(); + + Some(::request::HeadersResponse { headers: headers }) } /// Get a block header by id. fn block_header(&self, id: BlockId) -> Option; - /// Provide as many as possible of the requested blocks (minus the headers) encoded - /// in RLP format. - fn block_bodies(&self, req: request::Bodies) -> Vec> { - req.block_hashes.into_iter() - .map(|hash| self.block_body(BlockId::Hash(hash))) - .collect() - } + /// Fulfill a block body request. + fn block_body(&self, req: request::CompleteBodyRequest) -> Option; - /// Get a block body by id. - fn block_body(&self, id: BlockId) -> Option; + /// Fulfill a request for block receipts. + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; - /// Provide the receipts as many as possible of the requested blocks. - /// Returns a vector of RLP-encoded lists of receipts. - fn receipts(&self, req: request::Receipts) -> Vec { - req.block_hashes.into_iter() - .map(|hash| self.block_receipts(&hash)) - .map(|receipts| receipts.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec())) - .collect() - } + /// Get an account proof. + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option; - /// Get a block's receipts as an RLP-encoded list by block hash. - fn block_receipts(&self, hash: &H256) -> Option; + /// Get a storage proof. + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option; - /// Provide a set of merkle proofs, as requested. Each request is a - /// block hash and request parameters. - /// - /// Returns a vector of RLP-encoded lists satisfying the requests. - fn proofs(&self, req: request::StateProofs) -> Vec { - use rlp::{RlpStream, Stream}; - - let mut results = Vec::with_capacity(req.requests.len()); - - for request in req.requests { - let proof = self.state_proof(request); - - let mut stream = RlpStream::new_list(proof.len()); - for node in proof { - stream.append_raw(&node, 1); - } - - results.push(stream.out()); - } - - results - } - - /// Get a state proof from a request. Each proof should be a vector - /// of rlp-encoded trie nodes, in ascending order by distance from the root. - fn state_proof(&self, req: request::StateProof) -> Vec; - - /// Provide contract code for the specified (block_hash, account_hash) pairs. - /// Each item in the resulting vector is either the raw bytecode or empty. - fn contract_codes(&self, req: request::ContractCodes) -> Vec { - req.code_requests.into_iter() - .map(|req| self.contract_code(req)) - .collect() - } - - /// Get contract code by request. Either the raw bytecode or empty. - fn contract_code(&self, req: request::ContractCode) -> Bytes; - - /// Provide header proofs from the Canonical Hash Tries as well as the headers - /// they correspond to -- each element in the returned vector is a 2-tuple. - /// The first element is a block header and the second a merkle proof of - /// the header in a requested CHT. - fn header_proofs(&self, req: request::HeaderProofs) -> Vec { - use rlp::{self, RlpStream, Stream}; - - req.requests.into_iter() - .map(|req| self.header_proof(req)) - .map(|maybe_proof| match maybe_proof { - None => rlp::EMPTY_LIST_RLP.to_vec(), - Some((header, proof)) => { - let mut stream = RlpStream::new_list(2); - stream.append_raw(&header.into_inner(), 1).begin_list(proof.len()); - - for node in proof { - stream.append_raw(&node, 1); - } - - stream.out() - } - }) - .collect() - } + /// Provide contract code for the specified (block_hash, code_hash) pair. + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option; /// Provide a header proof from a given Canonical Hash Trie as well as the - /// corresponding header. The first element is the block header and the - /// second is a merkle proof of the CHT. - fn header_proof(&self, req: request::HeaderProof) -> Option<(encoded::Header, Vec)>; + /// corresponding header. + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; /// Provide pending transactions. fn ready_transactions(&self) -> Vec; @@ -217,32 +149,52 @@ impl Provider for T { BlockChainClient::block_header(self, id) } - fn block_body(&self, id: BlockId) -> Option { + fn block_body(&self, req: request::CompleteBodyRequest) -> Option; BlockChainClient::block_body(self, id) + .map(|body| ::request::BodyResponse { body: body }) } - fn block_receipts(&self, hash: &H256) -> Option { + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; BlockChainClient::block_receipts(self, hash) + .map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode(&x) }) } - fn state_proof(&self, req: request::StateProof) -> Vec { - match req.key2 { - Some(key2) => self.prove_storage(req.key1, key2, req.from_level, BlockId::Hash(req.block)), - None => self.prove_account(req.key1, req.from_level, BlockId::Hash(req.block)), - } + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + self.prove_account(req.address_hash, BlockId::Hash(req.block_hash)).map(|(proof, acc)| { + ::request::AccountResponse { + proof: proof, + nonce: acc.nonce, + balance: acc.balance, + code_hash: acc.code_hash, + storage_root: acc.storage_root, + } + })) } - fn contract_code(&self, req: request::ContractCode) -> Bytes { - self.code_by_hash(req.account_key, BlockId::Hash(req.block_hash)) + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + self.prove_account(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { + ::request::StorageResponse { + proof: proof, + value: item, + } + })) } - fn header_proof(&self, req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { - if Some(req.cht_number) != cht::block_to_cht_number(req.block_number) { - debug!(target: "les_provider", "Requested CHT number mismatch with block number."); - return None; - } + fn contract_code(&self, req: request::ContractCode) -> Option { + self.state_data(&req.code_hash) + .map(|code| ::request::CodeResponse { code: code }) + } - let mut needed_hdr = None; + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; + let cht_number = match cht::block_to_cht_number(req.num) { + Some(cht_num) => cht_num, + None => { + debug!(target: "les_provider", "Requested CHT proof with invalid block number"); + return None; + } + }; + + let mut needed = None; // build the CHT, caching the requested header as we pass through it. let cht = { @@ -258,8 +210,8 @@ impl Provider for T { total_difficulty: td, }; - if hdr.number() == req.block_number { - needed_hdr = Some(hdr); + if hdr.number() == req.num { + needed = Some((hdr, td)); } Some(info) @@ -268,17 +220,21 @@ impl Provider for T { } }; - match cht::build(req.cht_number, block_info) { + match cht::build(cht_number, block_info) { Some(cht) => cht, None => return None, // incomplete CHT. } }; - let needed_hdr = needed_hdr.expect("`needed_hdr` always set in loop, number checked before; qed"); + let (needed_hdr, needed_td) = needed.expect("`needed` always set in loop, number checked before; qed"); // prove our result. - match cht.prove(req.block_number, req.from_level) { - Ok(Some(proof)) => Some((needed_hdr, proof)), + match cht.prove(req.num, 0) { + Ok(Some(proof)) => Some(::request::HeaderProofResponse { + proof: proof, + hash: needed_hdr.hash(), + td: needed_td, + }), Ok(None) => None, Err(e) => { debug!(target: "les_provider", "Error looking up number in freshly-created CHT: {}", e); @@ -347,23 +303,27 @@ impl Provider for LightProvider { self.client.as_light_client().block_header(id) } - fn block_body(&self, _id: BlockId) -> Option { + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { None } - fn block_receipts(&self, _hash: &H256) -> Option { + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { None } - fn state_proof(&self, _req: request::StateProof) -> Vec { - Vec::new() + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + None } - fn contract_code(&self, _req: request::ContractCode) -> Bytes { - Vec::new() + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + None } - fn header_proof(&self, _req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + None + } + + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { None } diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 259f3def7..2b23d0380 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -113,6 +113,15 @@ pub enum Output { Number(u64), } +impl Output { + fn kind(&self) -> OutputKind { + match *self { + Output::Hash(_) => OutputKind::Hash, + Output::Number(_) => OutputKind::Number, + } + } +} + /// Response output kinds which can be used as back-references. #[derive(Debug, Clone, PartialEq, Eq)] pub enum OutputKind { @@ -145,6 +154,7 @@ impl From for HashOrNumber { } /// All request types, as they're sent over the network. +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Request { /// A request for block headers. Headers(IncompleteHeadersRequest), @@ -164,6 +174,27 @@ pub enum Request { // Transaction proof. } +/// All request types, as they're sent over the network. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CompleteRequest { + /// A request for block headers. + Headers(CompleteHeadersRequest), + /// A request for a header proof (from a CHT) + HeaderProof(CompleteHeaderProofRequest), + // TransactionIndex, + /// A request for a block's receipts. + Receipts(CompleteReceiptsRequest), + /// A request for a block body. + Body(CompleteBodyRequest), + /// A request for a merkle proof of an account. + Account(CompleteAccountRequest), + /// A request for a merkle proof of contract storage. + Storage(CompleteStorageRequest), + /// A request for contract code. + Code(CompleteCodeRequest), + // Transaction proof. +} + impl Request { fn kind(&self) -> RequestKind { match *self { @@ -210,10 +241,54 @@ impl Encodable for Request { } } +impl IncompleteRequest for Request { + type Complete = CompleteRequest; + + fn check_outputs(&self, f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + match *self { + Request::Headers(ref req) => req.check_outputs(f), + Request::HeaderProof(ref req) => req.check_outputs(f), + Request::Receipts(ref req) => req.check_outputs(f), + Request::Body(ref req) => req.check_outputs(f), + Request::Account(ref req) => req.check_outputs(f), + Request::Storage(ref req) => req.check_outputs(f), + Request::Code(ref req) => req.check_outputs(f), + } + } + + fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind) { + match *self { + Request::Headers(ref req) => req.note_outputs(f), + Request::HeaderProof(ref req) => req.note_outputs(f), + Request::Receipts(ref req) => req.note_outputs(f), + Request::Body(ref req) => req.note_outputs(f), + Request::Account(ref req) => req.note_outputs(f), + Request::Storage(ref req) => req.note_outputs(f), + Request::Code(ref req) => req.note_outputs(f), + } + } + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + match self { + Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)), + Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)), + Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)), + Request::Body(req) => CompleteRequest::Body(req.fill(oracle)), + Request::Account(req) => CompleteRequest::Account(req.fill(oracle)), + Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)), + Request::Code(req) => CompleteRequest::Code(req.fill(oracle)), + } + } +} /// Kinds of requests. /// Doubles as the "ID" field of the request. #[repr(u8)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestKind { /// A request for headers. Headers = 0, @@ -252,6 +327,42 @@ impl Encodable for RequestKind { } } +/// All response types. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Response { + /// A response for block headers. + Headers(HeadersResponse), + /// A response for a header proof (from a CHT) + HeaderProof(HeaderProofResponse), + // TransactionIndex, + /// A response for a block's receipts. + Receipts(ReceiptsResponse), + /// A response for a block body. + Body(BodyResponse), + /// A response for a merkle proof of an account. + Account(AccountResponse), + /// A response for a merkle proof of contract storage. + Storage(StorageResponse), + /// A response for contract code. + Code(CodeResponse), + // Transaction proof. +} + +impl Response { + /// Fill reusable outputs by writing them into the function. + pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + match *self { + Response::Headers(res) => res.fill_outputs(f) + Response::HeaderProof(res) => res.fill_outputs(f) + Response::Receipts(res) => res.fill_outputs(f) + Response::Body(res) => res.fill_outputs(f) + Response::Account(res) => res.fill_outputs(f) + Response::Storage(res) => res.fill_outputs(f) + Response::Code(res) => res.fill_outputs(f) + } + } +} + /// A potentially incomplete request. pub trait IncompleteRequest: Sized { type Complete; @@ -369,7 +480,8 @@ pub mod header { /// The output of a request for headers. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Response { - header: Vec, + /// The headers requested. + pub headers: Vec, } impl Response { @@ -523,7 +635,6 @@ pub mod block_receipts { hash: hash, }) } - } /// A complete block receipts request. From b396b56e34e641e6e957ce573f5fdbdce9764079 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Mar 2017 17:18:26 +0100 Subject: [PATCH 18/89] encode and decode responses --- ethcore/light/src/provider.rs | 22 +- ethcore/light/src/types/request.rs | 491 +++++++++++++++++++++++++---- 2 files changed, 439 insertions(+), 74 deletions(-) diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 4e43296ab..4a9a96999 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -30,16 +30,9 @@ use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; use transaction_queue::TransactionQueue; - use request; -/// Defines the operations that a provider for `LES` must fulfill. -/// -/// These are defined at [1], but may be subject to change. -/// Requests which can't be fulfilled should return either an empty RLP list -/// or empty vector where appropriate. -/// -/// [1]: https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) +/// Defines the operations that a provider for the light subprotocol must fulfill. #[cfg_attr(feature = "ipc", ipc(client_ident="LightProviderClient"))] pub trait Provider: Send + Sync { /// Provide current blockchain info. @@ -149,12 +142,12 @@ impl Provider for T { BlockChainClient::block_header(self, id) } - fn block_body(&self, req: request::CompleteBodyRequest) -> Option; + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { BlockChainClient::block_body(self, id) .map(|body| ::request::BodyResponse { body: body }) } - fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option; + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { BlockChainClient::block_receipts(self, hash) .map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode(&x) }) } @@ -168,7 +161,7 @@ impl Provider for T { code_hash: acc.code_hash, storage_root: acc.storage_root, } - })) + }) } fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { @@ -177,7 +170,7 @@ impl Provider for T { proof: proof, value: item, } - })) + }) } fn contract_code(&self, req: request::ContractCode) -> Option { @@ -185,7 +178,7 @@ impl Provider for T { .map(|code| ::request::CodeResponse { code: code }) } - fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option; + fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { let cht_number = match cht::block_to_cht_number(req.num) { Some(cht_num) => cht_num, None => { @@ -243,7 +236,7 @@ impl Provider for T { } } - fn transaction_proof(&self, req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option { use ethcore::transaction::Transaction; let id = BlockId::Hash(req.at); @@ -261,6 +254,7 @@ impl Provider for T { }.fake_sign(req.from); self.prove_transaction(transaction, id) + .map(|proof| ::request::ExecutionResponse { items: proof }) } fn ready_transactions(&self) -> Vec { diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 2b23d0380..7ad16ea4d 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -38,9 +38,9 @@ pub use self::block_body::{ Incomplete as IncompleteBodyRequest, Response as BodyResponse }; -pub use self::receipts::{ +pub use self::block_receipts::{ Complete as CompleteReceiptsRequest, - Incomplete as IncompleteReceiptsRequest + Incomplete as IncompleteReceiptsRequest, Response as ReceiptsResponse }; pub use self::account::{ @@ -58,6 +58,11 @@ pub use self::contract_code::{ Incomplete as IncompleteCodeRequest, Response as CodeResponse, }; +pub use self::execution::{ + Complete as CompleteExecutionRequest, + Incomplete as IncompleteExecutionRequest, + Response as ExecutionResponse, +}; /// Error indicating a reference to a non-existent or wrongly-typed output. pub struct NoSuchOutput; @@ -87,7 +92,7 @@ impl Decodable for Field { 1 => Ok({ let inner_rlp = rlp.at(1)?; Field::BackReference(inner_rlp.val_at(0)?, inner_rlp.val_at(1)?) - }) + }), _ => Err(DecoderError::Custom("Unknown discriminant for PIP field.")), } } @@ -171,7 +176,8 @@ pub enum Request { Storage(IncompleteStorageRequest), /// A request for contract code. Code(IncompleteCodeRequest), - // Transaction proof. + /// A request for proof of execution, + Execution(IncompleteExecutionRequest), } /// All request types, as they're sent over the network. @@ -192,19 +198,21 @@ pub enum CompleteRequest { Storage(CompleteStorageRequest), /// A request for contract code. Code(CompleteCodeRequest), - // Transaction proof. + /// A request for proof of execution, + Execution(CompleteExecutionRequest), } impl Request { - fn kind(&self) -> RequestKind { + fn kind(&self) -> Kind { match *self { - Request::Headers(_) => RequestKind::Headers, - Request::HeaderProof(_) => RequestKind::HeaderProof, - Request::Receipts(_) => RequestKind::Receipts, - Request::Body(_) => RequestKind::Body, - Request::Account(_) => RequestKind::Account, - Request::Storage(_) => RequestKind::Storage, - Request::Code(_) => RequestKind::Code, + Request::Headers(_) => Kind::Headers, + Request::HeaderProof(_) => Kind::HeaderProof, + Request::Receipts(_) => Kind::Receipts, + Request::Body(_) => Kind::Body, + Request::Account(_) => Kind::Account, + Request::Storage(_) => Kind::Storage, + Request::Code(_) => Kind::Code, + Request::Execution(_) => Kind::Execution, } } } @@ -213,14 +221,15 @@ impl Decodable for Request { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); - match rlp.val_at::(0)? { - RequestKind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), - RequestKind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), - RequestKind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), - RequestKind::Body => Ok(Request::Body(rlp.val_at(1)?)), - RequestKind::Account => Ok(Request::Account(rlp.val_at(1)?)), - RequestKind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), - RequestKind::Code => Ok(Request::Code(rlp.val_at(1)?)), + match rlp.val_at::(0)? { + Kind::Headers => Ok(Request::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Request::HeaderProof(rlp.val_at(1)?)), + Kind::Receipts => Ok(Request::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Request::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Request::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Request::Code(rlp.val_at(1)?)), + Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)), } } } @@ -237,6 +246,7 @@ impl Encodable for Request { Request::Account(ref req) => s.append(req), Request::Storage(ref req) => s.append(req), Request::Code(ref req) => s.append(req), + Request::Execution(ref req) => s.append(req), }; } } @@ -255,6 +265,7 @@ impl IncompleteRequest for Request { Request::Account(ref req) => req.check_outputs(f), Request::Storage(ref req) => req.check_outputs(f), Request::Code(ref req) => req.check_outputs(f), + Request::Execution(ref req) => req.check_outputs(f), } } @@ -267,29 +278,31 @@ impl IncompleteRequest for Request { Request::Account(ref req) => req.note_outputs(f), Request::Storage(ref req) => req.note_outputs(f), Request::Code(ref req) => req.note_outputs(f), + Request::Execution(ref req) => req.note_outputs(f), } } fn fill(self, oracle: F) -> Result where F: Fn(usize, usize) -> Result { - match self { - Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)), - Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)), - Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)), - Request::Body(req) => CompleteRequest::Body(req.fill(oracle)), - Request::Account(req) => CompleteRequest::Account(req.fill(oracle)), - Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)), - Request::Code(req) => CompleteRequest::Code(req.fill(oracle)), - } + Ok(match self { + Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)?), + Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)?), + Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)?), + Request::Body(req) => CompleteRequest::Body(req.fill(oracle)?), + Request::Account(req) => CompleteRequest::Account(req.fill(oracle)?), + Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)?), + Request::Code(req) => CompleteRequest::Code(req.fill(oracle)?), + Request::Execution(req) => CompleteRequest::Execution(req.fill(oracle)?), + }) } } /// Kinds of requests. /// Doubles as the "ID" field of the request. #[repr(u8)] -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum RequestKind { +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Kind { /// A request for headers. Headers = 0, HeaderProof = 1, @@ -299,29 +312,29 @@ pub enum RequestKind { Account = 5, Storage = 6, Code = 7, - // TransactionProof = 8, + Execution = 8, } -impl Decodable for RequestKind { +impl Decodable for Kind { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); match rlp.as_val::()? { - 0 => Ok(RequestKind::Headers), - 1 => Ok(RequestKind::HeaderProof), - // 2 => Ok(RequestKind::TransactionIndex, - 3 => Ok(RequestKind::Receipts), - 4 => Ok(RequestKind::Body), - 5 => Ok(RequestKind::Account), - 6 => Ok(RequestKind::Storage), - 7 => Ok(RequestKind::Code), - // 8 => Ok(RequestKind::TransactionProof), + 0 => Ok(Kind::Headers), + 1 => Ok(Kind::HeaderProof), + // 2 => Ok(Kind::TransactionIndex, + 3 => Ok(Kind::Receipts), + 4 => Ok(Kind::Body), + 5 => Ok(Kind::Account), + 6 => Ok(Kind::Storage), + 7 => Ok(Kind::Code), + 8 => Ok(Kind::Execution), _ => Err(DecoderError::Custom("Unknown PIP request ID.")), } } } -impl Encodable for RequestKind { +impl Encodable for Kind { fn rlp_append(&self, s: &mut RlpStream) { s.append(self as &u8); } @@ -345,22 +358,71 @@ pub enum Response { Storage(StorageResponse), /// A response for contract code. Code(CodeResponse), - // Transaction proof. + /// A response for proof of execution, + Execution(ExecutionResponse), } impl Response { /// Fill reusable outputs by writing them into the function. pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { match *self { - Response::Headers(res) => res.fill_outputs(f) - Response::HeaderProof(res) => res.fill_outputs(f) - Response::Receipts(res) => res.fill_outputs(f) - Response::Body(res) => res.fill_outputs(f) - Response::Account(res) => res.fill_outputs(f) - Response::Storage(res) => res.fill_outputs(f) - Response::Code(res) => res.fill_outputs(f) + Response::Headers(res) => res.fill_outputs(f), + Response::HeaderProof(res) => res.fill_outputs(f), + Response::Receipts(res) => res.fill_outputs(f), + Response::Body(res) => res.fill_outputs(f), + Response::Account(res) => res.fill_outputs(f), + Response::Storage(res) => res.fill_outputs(f), + Response::Code(res) => res.fill_outputs(f), + Response::Execution(res) => res.fill_outputs(f), } } + + fn kind(&self) -> Kind { + match *self { + Response::Headers(_) => Kind::Headers, + Response::HeaderProof(_) => Kind::HeaderProof, + Response::Receipts(_) => Kind::Receipts, + Response::Body(_) => Kind::Body, + Response::Account(_) => Kind::Account, + Response::Storage(_) => Kind::Storage, + Response::Code(_) => Kind::Code, + Respnse::Execution(_) => Kind::Execution, + } + } +} + +impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + match rlp.val_at::(0)? { + Kind::Headers => Ok(Response::Headers(rlp.val_at(1)?)), + Kind::HeaderProof => Ok(Response::HeaderProof(rlp.val_at(1)?)), + Kind::Receipts => Ok(Response::Receipts(rlp.val_at(1)?)), + Kind::Body => Ok(Response::Body(rlp.val_at(1)?)), + Kind::Account => Ok(Response::Account(rlp.val_at(1)?)), + Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), + Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), + Kind::Execution=> Ok(Response::Execution(rlp.val_at(1)?)), + } + } +} + +impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2).append(&self.kind()); + + match *self { + Response::Headers(ref res) => s.append(res), + Response::HeaderProof(ref res) => s.append(res), + Response::Receipts(ref res) => s.append(res), + Response::Body(ref res) => s.append(res), + Response::Account(ref res) => s.append(res), + Response::Storage(ref res) => s.append(res), + Response::Code(ref res) => s.append(res), + Response::Execution(ref res) => s.append(res), + }; + } } /// A potentially incomplete request. @@ -390,7 +452,7 @@ pub trait IncompleteRequest: Sized { pub mod header { use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::U256; /// Potentially incomplete headers request. @@ -488,12 +550,41 @@ pub mod header { /// Fill reusable outputs by writing them into the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) { } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + use ethcore::header::Header as FullHeader; + let rlp = decoder.as_rlp(); + + let mut headers = Vec::new(); + + for item in rlp.at(0)?.iter() { + // check that it's a valid encoding. + // TODO: just return full headers here? + let _: FullHeader = item.as_val()?; + headers.push(encoded::Header::new(item.as_raw().to_owned())); + } + + Ok(Response { + headers: headers, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.headers.len()); + for header in &self.headers { + s.append_raw(header.rlp().as_raw(), 1); + } + } + } } /// Request and response for header proofs. pub mod header_proof { use super::{Field, NoSuchOutput, OutputKind, Output}; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete header proof request. @@ -576,12 +667,34 @@ pub mod header_proof { f(1, Output::Hash(self.hash)); } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + proof: rlp.val_at(0)?, + hash: rlp.val_at(1)?, + td: rlp.val_at(2)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3) + .append(&self.proof) + .append(&self.hash) + .append(&self.td); + } + } } /// Request and response for block receipts pub mod block_receipts { use super::{Field, NoSuchOutput, OutputKind, Output}; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use ethcore::receipt::Receipt; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete block receipts request. @@ -655,13 +768,29 @@ pub mod block_receipts { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + receipts: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.receipts); + } + } } /// Request and response for a block body pub mod block_body { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete block body request. @@ -736,13 +865,38 @@ pub mod block_body { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + use ethcore::header::Header as FullHeader; + use ethcore::transaction::SignedTransaction; + + let rlp = decoder.as_rlp(); + let body_rlp = rlp.at(0)?; + + // check body validity. + let _: Vec = rlp.val_at(0)?; + let _: Vec = rlp.val_at(1)?; + + Ok(Response { + body: encoded::Body::new(body_rlp.as_raw().to_owned()), + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append_raw(&self.body.rlp().as_raw(), 2); + } + } } /// A request for an account proof. pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an account proof. @@ -852,13 +1006,38 @@ pub mod account { f(1, Output::Hash(self.storage_root)); } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + proof: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + balance: rlp.val_at(2)?, + code_hash: rlp.val_at(3)?, + storage_root: rlp.val_at(4)? + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5) + .append(&self.proof) + .append(&self.nonce) + .append(&self.balance) + .append(&self.code_hash) + .append(&self.storage_root) + } + } } /// A request for a storage proof. pub mod storage { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; /// Potentially incomplete request for an storage proof. @@ -979,16 +1158,35 @@ pub mod storage { f(0, Output::Hash(self.value)); } } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + proof: rlp.val_at(0)?, + value: rlp.val_at(1)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2) + .append(&self.proof) + .append(&self.value); + } + } } /// A request for contract code. pub mod contract_code { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; - use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; - /// Potentially incomplete _ request. + /// Potentially incomplete contract code request. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Incomplete { /// The block hash to request the state for. @@ -1080,4 +1278,177 @@ pub mod contract_code { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + Ok(Response { + code: rlp.val_at(0)?, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.code); + } + } +} + +/// A request for proof of execution. +pub mod execution { + use super::{Field, NoSuchOutput, OutputKind, Output}; + use ethcore::encoded; + use ethcore::transaction::Action; + use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; + use util::{Bytes, Address, U256, H256, DBValue}; + + /// Potentially incomplete execution proof request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Incomplete { + /// The block hash to request the state for. + pub block_hash: Field, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } + + impl Decodable for Incomplete { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + Ok(Incomplete { + block_hash: rlp.val_at(0)?, + address: rlp.val_at(1)?, + action: rlp.val_at(2)?, + gas: rlp.val_at(3)?, + gas_price: rlp.val_at(4)?, + value: rlp.val_at(5)?, + data: rlp.val_at(6)?, + }) + } + } + + impl Encodable for Incomplete { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(7) + .append(&self.block_hash) + .append(&self.from); + + match *self.action { + Action::Create => s.append_empty_data(), + Action::Call(ref addr) => s.append(addr), + }; + + s.append(&self.gas) + .append(&self.gas_price) + .append(&self.value) + .append(&self.data); + } + } + + impl super::IncompleteRequest for Incomplete { + type Complete = Complete; + + fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> + where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> + { + if let Field::BackReference(req, idx) = self.block_hash { + f(req, idx, OutputKind::Hash)?; + } + + Ok(()) + } + + fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} + + fn fill(self, oracle: F) -> Result + where F: Fn(usize, usize) -> Result + { + let block_hash = match self.block_hash { + Field::Scalar(hash) => hash, + Field::BackReference(req, idx) => match oracle(req, idx)? { + Output::Hash(hash) => hash, + _ => return Err(NoSuchOutput)?, + } + }; + + Ok(Complete { + block_hash: block_hash, + from: self.from, + action: self.action, + gas: self.gas, + gas_price: self.gas_price, + value: self.value, + data: self.data, + }) + } + + } + + /// A complete request. + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Complete { + /// The block hash to request the state for. + pub block_hash: H256, + /// The address the transaction should be from. + pub from: Address, + /// The action of the transaction. + pub action: Action, + /// The amount of gas to prove. + pub gas: U256, + /// The gas price. + pub gas_price: U256, + /// The value to transfer. + pub value: U256, + /// Call data. + pub data: Bytes, + } + + /// The output of a request for proof of execution + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Response { + /// All state items (trie nodes, code) necessary to re-prove the transaction. + pub items: Vec, + } + + impl Response { + /// Fill reusable outputs by providing them to the function. + pub fn fill_outputs(&self, _: F) where F: FnMut(usize, Output) {} + } + + impl Decodable for Response { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + let mut items = Vec::new(); + for raw_item in rlp.at(0)?.iter() { + let mut item = DBValue::new(); + item.append_slice(raw_item.data()); + items.push(item); + } + + Ok(Response { + items: items, + }) + } + } + + impl Encodable for Response { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(&self.items.len()); + + for item in &self.items { + s.append(&&**item); + } + } + } } From 04291fe71e37fd8679f8a76ce853cadcbc90f844 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Mar 2017 19:48:07 +0100 Subject: [PATCH 19/89] complete initial request changes --- ethcore/light/Cargo.toml | 2 +- ethcore/light/src/lib.rs | 3 +- ethcore/light/src/net/context.rs | 14 +- ethcore/light/src/net/mod.rs | 805 ++--------------------- ethcore/light/src/net/request_credits.rs | 166 ++--- ethcore/light/src/net/request_set.rs | 17 +- ethcore/light/src/provider.rs | 14 +- ethcore/light/src/request_builder.rs | 116 ++++ ethcore/light/src/types/request.rs | 97 +-- 9 files changed, 273 insertions(+), 961 deletions(-) create mode 100644 ethcore/light/src/request_builder.rs diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 9e10449fb..cab75a36a 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -1,5 +1,5 @@ [package] -description = "Parity LES primitives" +description = "Parity Light Client Implementation" homepage = "http://parity.io" license = "GPL-3.0" name = "ethcore-light" diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ada58d8de..b15c85242 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -35,9 +35,10 @@ pub mod client; pub mod cht; pub mod net; -pub mod on_demand; +//pub mod on_demand; pub mod transaction_queue; pub mod cache; +pub mod request_builder; #[cfg(not(feature = "ipc"))] pub mod provider; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index bd0c8a6bb..332d497a1 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -12,7 +12,7 @@ // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// along with Parity. If not, see . //! I/O and event context generalizations. @@ -89,10 +89,6 @@ pub trait BasicContext { // TODO: maybe just put this on a timer in LightProtocol? fn make_announcement(&self, announcement: Announcement); - /// Find the maximum number of requests of a specific type which can be made from - /// supplied peer. - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize; - /// Disconnect a peer. fn disconnect_peer(&self, peer: PeerId); @@ -131,10 +127,6 @@ impl<'a> BasicContext for TickCtx<'a> { self.proto.make_announcement(self.io, announcement); } - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.proto.max_requests(peer, kind) - } - fn disconnect_peer(&self, peer: PeerId) { self.io.disconnect_peer(peer); } @@ -168,10 +160,6 @@ impl<'a> BasicContext for Ctx<'a> { self.proto.make_announcement(self.io, announcement); } - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.proto.max_requests(peer, kind) - } - fn disconnect_peer(&self, peer: PeerId) { self.io.disconnect_peer(peer); } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 181f95e95..1b2433fbe 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! LES Protocol Version 1 implementation. +//! PIP Protocol Version 1 implementation. //! //! This uses a "Provider" to answer requests. -//! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) use ethcore::transaction::{Action, UnverifiedTransaction}; use ethcore::receipt::Receipt; @@ -35,7 +34,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{self, HashOrNumber, Request}; +use request::{self, HashOrNumber, Request, Response}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; @@ -83,43 +82,24 @@ mod packet { // announcement of new block hashes or capabilities. pub const ANNOUNCE: u8 = 0x01; - // request and response for block headers - pub const GET_BLOCK_HEADERS: u8 = 0x02; - pub const BLOCK_HEADERS: u8 = 0x03; - - // request and response for block bodies - pub const GET_BLOCK_BODIES: u8 = 0x04; - pub const BLOCK_BODIES: u8 = 0x05; - - // request and response for transaction receipts. - pub const GET_RECEIPTS: u8 = 0x06; - pub const RECEIPTS: u8 = 0x07; - - // request and response for merkle proofs. - pub const GET_PROOFS: u8 = 0x08; - pub const PROOFS: u8 = 0x09; - - // request and response for contract code. - pub const GET_CONTRACT_CODES: u8 = 0x0a; - pub const CONTRACT_CODES: u8 = 0x0b; + // request and response. + pub const REQUEST: u8 = 0x02; + pub const RESPONSE: u8 = 0x03; // relay transactions to peers. - pub const SEND_TRANSACTIONS: u8 = 0x0c; - - // request and response for header proofs in a CHT. - pub const GET_HEADER_PROOFS: u8 = 0x0d; - pub const HEADER_PROOFS: u8 = 0x0e; + pub const SEND_TRANSACTIONS: u8 = 0x04; // request and response for transaction proof. - pub const GET_TRANSACTION_PROOF: u8 = 0x0f; - pub const TRANSACTION_PROOF: u8 = 0x10; + // TODO: merge with request/response. + pub const GET_TRANSACTION_PROOF: u8 = 0x05; + pub const TRANSACTION_PROOF: u8 = 0x06; } // timeouts for different kinds of requests. all values are in milliseconds. // TODO: variable timeouts based on request count. mod timeout { pub const HANDSHAKE: i64 = 2500; - pub const HEADERS: i64 = 5000; + pub const HEADERS: i64 = 2500; pub const BODIES: i64 = 5000; pub const RECEIPTS: i64 = 3500; pub const PROOFS: i64 = 4000; @@ -159,17 +139,6 @@ pub struct Peer { } impl Peer { - // check the maximum cost of a request, returning an error if there's - // not enough credits left. - // returns the calculated maximum cost. - fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result { - flow_params.recharge(&mut self.local_credits); - - let max_cost = flow_params.compute_cost(kind, max); - self.local_credits.deduct_cost(max_cost)?; - Ok(max_cost) - } - // refund credits for a request. returns new amount of credits. fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { flow_params.refund(&mut self.local_credits, amount); @@ -197,20 +166,8 @@ pub trait Handler: Send + Sync { fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { } /// Called when a peer requests relay of some transactions. fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { } - /// Called when a peer responds with block bodies. - fn on_block_bodies(&self, _ctx: &EventContext, _req_id: ReqId, _bodies: &[Bytes]) { } - /// Called when a peer responds with block headers. - fn on_block_headers(&self, _ctx: &EventContext, _req_id: ReqId, _headers: &[Bytes]) { } - /// Called when a peer responds with block receipts. - fn on_receipts(&self, _ctx: &EventContext, _req_id: ReqId, _receipts: &[Vec]) { } - /// Called when a peer responds with state proofs. Each proof should be a series of trie - /// nodes in ascending order by distance from the root. - fn on_state_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[Vec]) { } - /// Called when a peer responds with contract code. - fn on_code(&self, _ctx: &EventContext, _req_id: ReqId, _codes: &[Bytes]) { } - /// Called when a peer responds with header proofs. Each proof should be a block header coupled - /// with a series of trie nodes is ascending order by distance from the root. - fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec)]) { } + /// Called when a peer responds to requests. + fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _relay: &[Response]) { } /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } /// Called to "tick" the handler periodically. @@ -307,7 +264,7 @@ pub struct LightProtocol { impl LightProtocol { /// Create a new instance of the protocol manager. pub fn new(provider: Arc, params: Params) -> Self { - debug!(target: "les", "Initializing LES handler"); + debug!(target: "pip", "Initializing light protocol handler"); let genesis_hash = provider.chain_info().genesis_hash; LightProtocol { @@ -339,62 +296,15 @@ impl LightProtocol { ) } - /// Check the maximum amount of requests of a specific type - /// which a peer would be able to serve. Returns zero if the - /// peer is unknown or has no credit parameters. - fn max_requests(&self, peer: PeerId, kind: request::Kind) -> usize { - self.peers.read().get(&peer).and_then(|peer| { - let mut peer = peer.lock(); - match peer.remote_flow { - Some((ref mut c, ref flow)) => { - flow.recharge(c); - Some(flow.max_amount(&*c, kind)) - } - None => None, - } - }).unwrap_or(0) - } - /// Make a request to a peer. /// /// Fails on: nonexistent peer, network error, peer not server, /// insufficient credits. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. + // TODO: pass `Requests`. pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { - let peers = self.peers.read(); - let peer = peers.get(peer_id).ok_or_else(|| Error::UnknownPeer)?; - let mut peer = peer.lock(); - - match peer.remote_flow { - Some((ref mut c, ref flow)) => { - flow.recharge(c); - let max = flow.compute_cost(request.kind(), request.amount()); - c.deduct_cost(max)?; - } - None => return Err(Error::NotServer), - } - - let req_id = self.req_id.fetch_add(1, Ordering::SeqCst); - let packet_data = encode_request(&request, req_id); - - trace!(target: "les", "Dispatching request {} to peer {}", req_id, peer_id); - - let packet_id = match request.kind() { - request::Kind::Headers => packet::GET_BLOCK_HEADERS, - request::Kind::Bodies => packet::GET_BLOCK_BODIES, - request::Kind::Receipts => packet::GET_RECEIPTS, - request::Kind::StateProofs => packet::GET_PROOFS, - request::Kind::Codes => packet::GET_CONTRACT_CODES, - request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS, - request::Kind::TransactionProof => packet::GET_TRANSACTION_PROOF, - }; - - io.send(*peer_id, packet_id, packet_data); - - peer.pending_requests.insert(ReqId(req_id), request, SteadyTime::now()); - - Ok(ReqId(req_id)) + unimplemented!() } /// Make an announcement of new chain head and capabilities to all peers. @@ -427,7 +337,7 @@ impl LightProtocol { None => { // both values will always originate locally -- this means something // has gone really wrong - debug!(target: "les", "couldn't compute reorganization depth between {:?} and {:?}", + debug!(target: "pip", "couldn't compute reorganization depth between {:?} and {:?}", &announcement.head_hash, &peer_info.sent_head); 0 } @@ -474,11 +384,10 @@ impl LightProtocol { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "les", "pre-verifying response from peer {}, kind={:?}", peer, kind); + trace!(target: "pip", "pre-verifying response from peer {}, kind={:?}", peer, kind); - let mut had_req = false; let peers = self.peers.read(); - let maybe_err = match peers.get(peer) { + let res = match peers.get(peer) { Some(peer_info) => { let mut peer_info = peer_info.lock(); let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); @@ -486,69 +395,37 @@ impl LightProtocol { match (req_info, flow_info) { (Some(request), Some(flow_info)) => { - had_req = true; - let &mut (ref mut c, ref mut flow) = flow_info; let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); c.update_to(actual_credits); - if request.kind() != kind { - Some(Error::UnsolicitedResponse) - } else { - None - } + Ok(()) } - (None, _) => Some(Error::UnsolicitedResponse), - (_, None) => Some(Error::NotServer), // really should be impossible. + (None, _) => Err(Error::UnsolicitedResponse), + (_, None) => Err(Error::NotServer), // really should be impossible. } } - None => Some(Error::UnknownPeer), // probably only occurs in a race of some kind. + None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind. }; - if had_req { - let id_guard = IdGuard::new(peers, *peer, req_id); - match maybe_err { - Some(err) => Err(err), - None => Ok(id_guard) - } - } else { - Err(maybe_err.expect("every branch without a request leads to error; qed")) - } + res.map(|_| IdGuard::new(peers, *peer, req_id)) } - /// Handle an LES packet using the given io context. + /// Handle a packet using the given io context. /// Packet data is _untrusted_, which means that invalid data won't lead to /// issues. pub fn handle_packet(&self, io: &IoContext, peer: &PeerId, packet_id: u8, data: &[u8]) { let rlp = UntrustedRlp::new(data); - trace!(target: "les", "Incoming packet {} from peer {}", packet_id, peer); + trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer); // handle the packet let res = match packet_id { packet::STATUS => self.status(peer, io, rlp), packet::ANNOUNCE => self.announcement(peer, io, rlp), - packet::GET_BLOCK_HEADERS => self.get_block_headers(peer, io, rlp), - packet::BLOCK_HEADERS => self.block_headers(peer, io, rlp), - - packet::GET_BLOCK_BODIES => self.get_block_bodies(peer, io, rlp), - packet::BLOCK_BODIES => self.block_bodies(peer, io, rlp), - - packet::GET_RECEIPTS => self.get_receipts(peer, io, rlp), - packet::RECEIPTS => self.receipts(peer, io, rlp), - - packet::GET_PROOFS => self.get_proofs(peer, io, rlp), - packet::PROOFS => self.proofs(peer, io, rlp), - - packet::GET_CONTRACT_CODES => self.get_contract_code(peer, io, rlp), - packet::CONTRACT_CODES => self.contract_code(peer, io, rlp), - - packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), - packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), - - packet::GET_TRANSACTION_PROOF => self.get_transaction_proof(peer, io, rlp), - packet::TRANSACTION_PROOF => self.transaction_proof(peer, io, rlp), + packet::REQUEST => self.request(peer, io, rlp), + packet::RESPONSE => self.response(peer, io, rlp), packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), @@ -577,7 +454,7 @@ impl LightProtocol { .collect(); for slowpoke in slowpokes { - debug!(target: "les", "Peer {} handshake timed out", slowpoke); + debug!(target: "pip", "Peer {} handshake timed out", slowpoke); pending.remove(&slowpoke); io.disconnect_peer(slowpoke); } @@ -587,7 +464,7 @@ impl LightProtocol { { for (peer_id, peer) in self.peers.read().iter() { if peer.lock().pending_requests.check_timeout(now) { - debug!(target: "les", "Peer {} request timeout", peer_id); + debug!(target: "pip", "Peer {} request timeout", peer_id); io.disconnect_peer(*peer_id); } } @@ -631,7 +508,7 @@ impl LightProtocol { /// called when a peer disconnects. pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) { - trace!(target: "les", "Peer {} disconnecting", peer); + trace!(target: "pip", "Peer {} disconnecting", peer); self.pending_peers.write().remove(&peer); let unfulfilled = match self.peers.write().remove(&peer) { @@ -686,7 +563,7 @@ impl LightProtocol { let (status, capabilities, flow_params) = status::parse_handshake(data)?; - trace!(target: "les", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); + trace!(target: "pip", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) { return Err(Error::WrongNetwork); @@ -723,7 +600,7 @@ impl LightProtocol { // Handle an announcement. fn announcement(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { if !self.peers.read().contains_key(peer) { - debug!(target: "les", "Ignoring announcement from unknown peer"); + debug!(target: "pip", "Ignoring announcement from unknown peer"); return Ok(()) } @@ -765,447 +642,19 @@ impl LightProtocol { Ok(()) } - // Handle a request for block headers. - fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_HEADERS: usize = 512; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - let data = data.at(1)?; - - let start_block = { - if data.at(0)?.size() == 32 { - HashOrNumber::Hash(data.val_at(0)?) - } else { - HashOrNumber::Number(data.val_at(0)?) - } - }; - - let req = request::Headers { - start: start_block, - max: ::std::cmp::min(MAX_HEADERS, data.val_at(1)?), - skip: data.val_at(2)?, - reverse: data.val_at(3)?, - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Headers, req.max)?; - - let response = self.provider.block_headers(req); - let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len()); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - io.respond(packet::BLOCK_HEADERS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for header in response { - stream.append_raw(&header.into_inner(), 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for block headers. - fn block_headers(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Headers, &raw)?; - let raw_headers: Vec<_> = raw.at(2)?.iter().map(|x| x.as_raw().to_owned()).collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_block_headers(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_headers); - } - - Ok(()) - } - - // Handle a request for block bodies. - fn get_block_bodies(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_BODIES: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = request::Bodies { - block_hashes: data.at(1)?.iter() - .take(MAX_BODIES) - .map(|x| x.as_val()) - .collect::>()? - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Bodies, req.block_hashes.len())?; - - let response = self.provider.block_bodies(req); - let response_len = response.iter().filter(|x| x.is_some()).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::BLOCK_BODIES, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for body in response { - match body { - Some(body) => stream.append_raw(&body.into_inner(), 1), - None => stream.append_empty_data(), - }; - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for block bodies. - fn block_bodies(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Bodies, &raw)?; - let raw_bodies: Vec = raw.at(2)?.iter().map(|x| x.as_raw().to_owned()).collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_block_bodies(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_bodies); - } - - Ok(()) - } - - // Handle a request for receipts. - fn get_receipts(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_RECEIPTS: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = request::Receipts { - block_hashes: data.at(1)?.iter() - .take(MAX_RECEIPTS) - .map(|x| x.as_val()) - .collect::>()? - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Receipts, req.block_hashes.len())?; - - let response = self.provider.receipts(req); - let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::RECEIPTS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for receipts in response { - stream.append_raw(&receipts, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for receipts. - fn receipts(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Receipts, &raw)?; - let raw_receipts: Vec> = raw.at(2)? - .iter() - .map(|x| x.as_val()) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_receipts(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_receipts); - } - - Ok(()) - } - - // Handle a request for proofs. - fn get_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_PROOFS: usize = 128; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_PROOFS).map(|x| { - Ok(request::StateProof { - block: x.val_at(0)?, - key1: x.val_at(1)?, - key2: if x.at(2)?.is_empty() { None } else { Some(x.val_at(2)?) }, - from_level: x.val_at(3)?, - }) - }).collect(); - - request::StateProofs { - requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::StateProofs, req.requests.len())?; - - let response = self.provider.proofs(req); - let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::PROOFS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for proof in response { - stream.append_raw(&proof, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for proofs. - fn proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::StateProofs, &raw)?; - - let raw_proofs: Vec> = raw.at(2)?.iter() - .map(|x| x.iter().map(|node| node.as_raw().to_owned()).collect()) - .collect(); - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_state_proofs(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proofs); - } - - Ok(()) - } - - // Handle a request for contract code. - fn get_contract_code(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_CODES: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_CODES).map(|x| { - Ok(request::ContractCode { - block_hash: x.val_at(0)?, - account_key: x.val_at(1)?, - }) - }).collect(); - - request::ContractCodes { - code_requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::Codes, req.code_requests.len())?; - - let response = self.provider.contract_codes(req); - let response_len = response.iter().filter(|x| !x.is_empty()).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::CONTRACT_CODES, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for code in response { - stream.append(&code); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for contract code. - fn contract_code(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::Codes, &raw)?; - - let raw_code: Vec = raw.at(2)?.iter() - .map(|x| x.as_val()) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_code(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_code); - } - - Ok(()) - } - - // Handle a request for header proofs - fn get_header_proofs(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> { - const MAX_PROOFS: usize = 256; - - let peers = self.peers.read(); - let peer = match peers.get(peer) { - Some(peer) => peer, - None => { - debug!(target: "les", "Ignoring request from unknown peer"); - return Ok(()) - } - }; - let mut peer = peer.lock(); - - let req_id: u64 = data.val_at(0)?; - - let req = { - let requests: Result, Error> = data.at(1)?.iter().take(MAX_PROOFS).map(|x| { - Ok(request::HeaderProof { - cht_number: x.val_at(0)?, - block_number: x.val_at(1)?, - from_level: x.val_at(2)?, - }) - }).collect(); - - request::HeaderProofs { - requests: requests?, - } - }; - - let max_cost = peer.deduct_max(&self.flow_params, request::Kind::HeaderProofs, req.requests.len())?; - - let response = self.provider.header_proofs(req); - let response_len = response.iter().filter(|x| &x[..] != ::rlp::EMPTY_LIST_RLP).count(); - let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len); - assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost."); - - let cur_credits = peer.refund(&self.flow_params, max_cost - actual_cost); - - io.respond(packet::HEADER_PROOFS, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for proof in response { - stream.append_raw(&proof, 1); - } - - stream.out() - }); - - Ok(()) - } - - // Receive a response for header proofs - fn header_proofs(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - fn decode_res(raw: UntrustedRlp) -> Result<(Bytes, Vec), ::rlp::DecoderError> { - Ok(( - raw.val_at(0)?, - raw.at(1)?.iter().map(|x| x.as_raw().to_owned()).collect(), - )) - } - - let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; - let raw_proofs: Vec<_> = raw.at(2)?.iter() - .map(decode_res) - .collect::>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_header_proofs(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proofs); - } - - Ok(()) - } - - // Receive a request for proof-of-execution. - fn get_transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - // refuse to execute more than this amount of gas at once. - // this is appx. the point at which the proof of execution would no longer fit in - // a single Devp2p packet. + fn request(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + // the maximum amount of requests we'll fill in a single packet. + const MAX_REQUESTS: usize = 512; + // the maximum amount of gas we'll prove execution of in a single packet. const MAX_GAS: usize = 50_000_000; - use util::Uint; + + use ::request_builder::RequestBuilder; let peers = self.peers.read(); let peer = match peers.get(peer) { Some(peer) => peer, None => { - debug!(target: "les", "Ignoring request from unknown peer"); + debug!(target: "pip", "Ignoring request from unknown peer"); return Ok(()) } }; @@ -1213,68 +662,11 @@ impl LightProtocol { let req_id: u64 = raw.val_at(0)?; - let req = { - let req_rlp = raw.at(1)?; - request::TransactionProof { - at: req_rlp.val_at(0)?, - from: req_rlp.val_at(1)?, - action: if req_rlp.at(2)?.is_empty() { - Action::Create - } else { - Action::Call(req_rlp.val_at(2)?) - }, - gas: ::std::cmp::min(req_rlp.val_at(3)?, MAX_GAS.into()), - gas_price: req_rlp.val_at(4)?, - value: req_rlp.val_at(5)?, - data: req_rlp.val_at(6)?, - } - }; - - // always charge the peer for all the gas. - peer.deduct_max(&self.flow_params, request::Kind::TransactionProof, req.gas.low_u64() as usize)?; - - let response = match self.provider.transaction_proof(req) { - Some(res) => res, - None => vec![], - }; - - let cur_credits = peer.local_credits.current(); - - io.respond(packet::TRANSACTION_PROOF, { - let mut stream = RlpStream::new_list(3); - stream.append(&req_id).append(&cur_credits).begin_list(response.len()); - - for state_item in response { - stream.append(&&state_item[..]); - } - - stream.out() - }); - - Ok(()) + unimplemented!() } - // Receive a response for proof-of-execution. - fn transaction_proof(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - let id_guard = self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw)?; - let raw_proof: Vec = raw.at(2)?.iter() - .map(|rlp| { - let mut db_val = DBValue::new(); - db_val.append_slice(rlp.data()?); - Ok(db_val) - }) - .collect::, ::rlp::DecoderError>>()?; - - let req_id = id_guard.defuse(); - for handler in &self.handlers { - handler.on_transaction_proof(&Ctx { - peer: *peer, - io: io, - proto: self, - }, req_id, &raw_proof); - } - - Ok(()) + fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + unimplemented!() } // Receive a set of transactions to relay. @@ -1286,7 +678,7 @@ impl LightProtocol { .map(|x| x.as_val::()) .collect::>()?; - debug!(target: "les", "Received {} transactions to relay from peer {}", txs.len(), peer); + debug!(target: "pip", "Received {} transactions to relay from peer {}", txs.len(), peer); for handler in &self.handlers { handler.on_transactions(&Ctx { @@ -1305,11 +697,11 @@ fn punish(peer: PeerId, io: &IoContext, e: Error) { match e.punishment() { Punishment::None => {} Punishment::Disconnect => { - debug!(target: "les", "Disconnecting peer {}: {}", peer, e); + debug!(target: "pip", "Disconnecting peer {}: {}", peer, e); io.disconnect_peer(peer) } Punishment::Disable => { - debug!(target: "les", "Disabling peer {}: {}", peer, e); + debug!(target: "pip", "Disabling peer {}: {}", peer, e); io.disable_peer(peer) } } @@ -1339,112 +731,7 @@ impl NetworkProtocolHandler for LightProtocol { match timer { TIMEOUT => self.timeout_check(io), TICK_TIMEOUT => self.tick_handlers(io), - _ => warn!(target: "les", "received timeout on unknown token {}", timer), - } - } -} - -// Helper for encoding the request to RLP with the given ID. -fn encode_request(req: &Request, req_id: usize) -> Vec { - match *req { - Request::Headers(ref headers) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(4); - - match headers.start { - HashOrNumber::Hash(ref hash) => stream.append(hash), - HashOrNumber::Number(ref num) => stream.append(num), - }; - - stream - .append(&headers.max) - .append(&headers.skip) - .append(&headers.reverse); - - stream.out() - } - Request::Bodies(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.block_hashes.len()); - - for hash in &request.block_hashes { - stream.append(hash); - } - - stream.out() - } - Request::Receipts(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.block_hashes.len()); - - for hash in &request.block_hashes { - stream.append(hash); - } - - stream.out() - } - Request::StateProofs(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.requests.len()); - - for proof_req in &request.requests { - stream.begin_list(4) - .append(&proof_req.block) - .append(&proof_req.key1); - - match proof_req.key2 { - Some(ref key2) => stream.append(key2), - None => stream.append_empty_data(), - }; - - stream.append(&proof_req.from_level); - } - - stream.out() - } - Request::Codes(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.code_requests.len()); - - for code_req in &request.code_requests { - stream.begin_list(2) - .append(&code_req.block_hash) - .append(&code_req.account_key); - } - - stream.out() - } - Request::HeaderProofs(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(request.requests.len()); - - for proof_req in &request.requests { - stream.begin_list(3) - .append(&proof_req.cht_number) - .append(&proof_req.block_number) - .append(&proof_req.from_level); - } - - stream.out() - } - Request::TransactionProof(ref request) => { - let mut stream = RlpStream::new_list(2); - stream.append(&req_id).begin_list(7) - .append(&request.at) - .append(&request.from); - - match request.action { - Action::Create => stream.append_empty_data(), - Action::Call(ref to) => stream.append(to), - }; - - stream - .append(&request.gas) - .append(&request.gas_price) - .append(&request.value) - .append(&request.data); - - stream.out() + _ => warn!(target: "pip", "received timeout on unknown token {}", timer), } } } diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 97aa9b431..e3821e05a 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -26,7 +26,7 @@ //! Current default costs are picked completely arbitrarily, not based //! on any empirical timings or mathematical models. -use request; +use request::{self, Request}; use super::packet; use super::error::Error; @@ -34,10 +34,6 @@ use rlp::*; use util::U256; use time::{Duration, SteadyTime}; -/// A request cost specification. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Cost(pub U256, pub U256); - /// Credits value. /// /// Produced and recharged using `FlowParams`. @@ -81,93 +77,43 @@ impl Credits { /// A cost table, mapping requests to base and per-request costs. #[derive(Debug, Clone, PartialEq, Eq)] pub struct CostTable { - headers: Cost, // cost per header - bodies: Cost, - receipts: Cost, - state_proofs: Cost, - contract_codes: Cost, - header_proofs: Cost, - transaction_proof: Cost, // cost per gas. + base: U256, // cost per packet. + headers: U256, // cost per header + body: U256, + receipts: U256, + account: U256, + storage: U256, + code: U256, + header_proof: U256, + transaction_proof: U256, // cost per gas. } impl Default for CostTable { fn default() -> Self { // arbitrarily chosen constants. CostTable { - headers: Cost(100000.into(), 10000.into()), - bodies: Cost(150000.into(), 15000.into()), - receipts: Cost(50000.into(), 5000.into()), - state_proofs: Cost(250000.into(), 25000.into()), - contract_codes: Cost(200000.into(), 20000.into()), - header_proofs: Cost(150000.into(), 15000.into()), - transaction_proof: Cost(100000.into(), 2.into()), + base: 100000.into(), + headers: 10000.into(), + body: 15000.into(), + receipts: 5000.into(), + account: 25000.into(), + storage: 25000.into(), + code: 20000.into(), + header_proof: 15000.into(), + transaction_proof: 2.into(), } } } impl RlpEncodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { - fn append_cost(s: &mut RlpStream, msg_id: u8, cost: &Cost) { - s.begin_list(3) - .append(&msg_id) - .append(&cost.0) - .append(&cost.1); - } - - s.begin_list(7); - - append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers); - append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies); - append_cost(s, packet::GET_RECEIPTS, &self.receipts); - append_cost(s, packet::GET_PROOFS, &self.state_proofs); - append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes); - append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs); - append_cost(s, packet::GET_TRANSACTION_PROOF, &self.transaction_proof); + unimplemented!() } } impl RlpDecodable for CostTable { fn decode(decoder: &D) -> Result where D: Decoder { - let rlp = decoder.as_rlp(); - - let mut headers = None; - let mut bodies = None; - let mut receipts = None; - let mut state_proofs = None; - let mut contract_codes = None; - let mut header_proofs = None; - let mut transaction_proof = None; - - for row in rlp.iter() { - let msg_id: u8 = row.val_at(0)?; - let cost = { - let base = row.val_at(1)?; - let per = row.val_at(2)?; - - Cost(base, per) - }; - - match msg_id { - packet::GET_BLOCK_HEADERS => headers = Some(cost), - packet::GET_BLOCK_BODIES => bodies = Some(cost), - packet::GET_RECEIPTS => receipts = Some(cost), - packet::GET_PROOFS => state_proofs = Some(cost), - packet::GET_CONTRACT_CODES => contract_codes = Some(cost), - packet::GET_HEADER_PROOFS => header_proofs = Some(cost), - packet::GET_TRANSACTION_PROOF => transaction_proof = Some(cost), - _ => return Err(DecoderError::Custom("Unrecognized message in cost table")), - } - } - - Ok(CostTable { - headers: headers.ok_or(DecoderError::Custom("No headers cost specified"))?, - bodies: bodies.ok_or(DecoderError::Custom("No bodies cost specified"))?, - receipts: receipts.ok_or(DecoderError::Custom("No receipts cost specified"))?, - state_proofs: state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))?, - contract_codes: contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))?, - header_proofs: header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))?, - transaction_proof: transaction_proof.ok_or(DecoderError::Custom("No transaction proof gas cost specified"))?, - }) + unimplemented!() } } @@ -192,17 +138,19 @@ impl FlowParams { /// Create effectively infinite flow params. pub fn free() -> Self { - let free_cost = Cost(0.into(), 0.into()); + let free_cost: U256 = 0.into(); FlowParams { limit: (!0u64).into(), recharge: 1.into(), costs: CostTable { + base: free_cost.clone(), headers: free_cost.clone(), - bodies: free_cost.clone(), + body: free_cost.clone(), receipts: free_cost.clone(), - state_proofs: free_cost.clone(), - contract_codes: free_cost.clone(), - header_proofs: free_cost.clone(), + account: free_cost.clone(), + storage: free_cost.clone(), + code: free_cost.clone(), + header_proof: free_cost.clone(), transaction_proof: free_cost, } } @@ -219,56 +167,20 @@ impl FlowParams { /// Compute the actual cost of a request, given the kind of request /// and number of requests made. - pub fn compute_cost(&self, kind: request::Kind, amount: usize) -> U256 { - let cost = match kind { - request::Kind::Headers => &self.costs.headers, - request::Kind::Bodies => &self.costs.bodies, - request::Kind::Receipts => &self.costs.receipts, - request::Kind::StateProofs => &self.costs.state_proofs, - request::Kind::Codes => &self.costs.contract_codes, - request::Kind::HeaderProofs => &self.costs.header_proofs, - request::Kind::TransactionProof => &self.costs.transaction_proof, - }; - - let amount: U256 = amount.into(); - cost.0 + (amount * cost.1) - } - - /// Compute the maximum number of costs of a specific kind which can be made - /// with the given amount of credits - /// Saturates at `usize::max()`. This is not a problem in practice because - /// this amount of requests is already prohibitively large. - pub fn max_amount(&self, credits: &Credits, kind: request::Kind) -> usize { - use util::Uint; - use std::usize; - - let cost = match kind { - request::Kind::Headers => &self.costs.headers, - request::Kind::Bodies => &self.costs.bodies, - request::Kind::Receipts => &self.costs.receipts, - request::Kind::StateProofs => &self.costs.state_proofs, - request::Kind::Codes => &self.costs.contract_codes, - request::Kind::HeaderProofs => &self.costs.header_proofs, - request::Kind::TransactionProof => &self.costs.transaction_proof, - }; - - let start = credits.current(); - - if start <= cost.0 { - return 0; - } else if cost.1 == U256::zero() { - return usize::MAX; - } - - let max = (start - cost.0) / cost.1; - if max >= usize::MAX.into() { - usize::MAX - } else { - max.as_u64() as usize + pub fn compute_cost(&self, request: &Request) -> U256 { + match *request { + Request::Headers(ref req) => self.costs.headers * req.max.into(), + Request::HeaderProof(_) => self.costs.header_proof, + Request::Body(_) => self.costs.body, + Request::Receipts(_) => self.costs.receipts, + Request::Account(_) => self.costs.account, + Request::Storage(_) => self.costs.storage, + Request::Code(_) => self.costs.code, + Request::Execution(ref req) => self.costs.transaction_proof * req.gas, } } - /// Create initial credits.. + /// Create initial credits. pub fn create_credits(&self) -> Credits { Credits { estimate: self.limit, diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index e6d4068da..c329d780f 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -89,22 +89,7 @@ impl RequestSet { None => return false, }; - let kind = self.reqs.values() - .next() - .map(|r| r.kind()) - .expect("base time implies `reqs` non-empty; qed"); - - let kind_timeout = match kind { - request::Kind::Headers => timeout::HEADERS, - request::Kind::Bodies => timeout::BODIES, - request::Kind::Receipts => timeout::RECEIPTS, - request::Kind::StateProofs => timeout::PROOFS, - request::Kind::Codes => timeout::CONTRACT_CODES, - request::Kind::HeaderProofs => timeout::HEADER_PROOFS, - request::Kind::TransactionProof => timeout::TRANSACTION_PROOF, - }; - - base + Duration::milliseconds(kind_timeout) <= now + unimplemented!() } /// Collect all pending request ids. diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 4a9a96999..be9239e4d 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -121,7 +121,7 @@ pub trait Provider: Send + Sync { /// Provide a proof-of-execution for the given transaction proof request. /// Returns a vector of all state items necessary to execute the transaction. - fn transaction_proof(&self, req: request::TransactionProof) -> Option>; + fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option; } // Implementation of a light client data provider for a client. @@ -143,12 +143,12 @@ impl Provider for T { } fn block_body(&self, req: request::CompleteBodyRequest) -> Option { - BlockChainClient::block_body(self, id) + BlockChainClient::block_body(self, BlockId::Hash(req.hash)) .map(|body| ::request::BodyResponse { body: body }) } fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { - BlockChainClient::block_receipts(self, hash) + BlockChainClient::block_receipts(self, &req.hash) .map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode(&x) }) } @@ -165,7 +165,7 @@ impl Provider for T { } fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { - self.prove_account(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { + self.prove_storage(req.address_hash, req.key_hash, BlockId::Hash(req.block_hash)).map(|(proof, item) | { ::request::StorageResponse { proof: proof, value: item, @@ -173,7 +173,7 @@ impl Provider for T { }) } - fn contract_code(&self, req: request::ContractCode) -> Option { + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { self.state_data(&req.code_hash) .map(|code| ::request::CodeResponse { code: code }) } @@ -239,7 +239,7 @@ impl Provider for T { fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option { use ethcore::transaction::Transaction; - let id = BlockId::Hash(req.at); + let id = BlockId::Hash(req.block_hash); let nonce = match self.nonce(&req.from, id.clone()) { Some(nonce) => nonce, None => return None, @@ -321,7 +321,7 @@ impl Provider for LightProvider { None } - fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { None } diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/request_builder.rs new file mode 100644 index 000000000..6233075bb --- /dev/null +++ b/ethcore/light/src/request_builder.rs @@ -0,0 +1,116 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Request chain builder utility. +//! Push requests with `push`. Back-references and data required to verify responses must be +//! supplied as well. + +use std::collections::{HashMap, VecDeque}; +use request::{ + IncompleteRequest, CompleteRequest, Request, + Field, OutputKind, Output, NoSuchOutput, Response, +}; + +/// Build chained requests. Push them onto the series with `push`, +/// and produce a `Requests` object with `build`. Outputs are checked for consistency. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct RequestBuilder { + output_kinds: HashMap<(usize, usize), OutputKind>, + requests: Vec, +} + +impl RequestBuilder { + /// Attempt to push a request onto the request chain. Fails if the request + /// references a non-existant output of a prior request. + pub fn push(&mut self, request: Request) -> Result<(), NoSuchOutput> { + request.check_outputs(|req, idx, kind| { + match self.output_kinds.get(&(req, idx)) { + Some(k) if k == &kind => Ok(()), + _ => Err(NoSuchOutput), + } + })?; + let req_idx = self.requests.len(); + request.note_outputs(|idx, kind| { self.output_kinds.insert((req_idx, idx), kind); }); + self.requests.push(request); + Ok(()) + } + + /// Convert this into a "requests" object. + pub fn build(self) -> Requests { + Requests { + output_kinds: self.output_kinds, + outputs: HashMap::new(), + requests: self.requests, + offset: 0, + } + } +} + +/// Requests pending responses. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Requests { + output_kinds: HashMap<(usize, usize), OutputKind>, + outputs: HashMap<(usize, usize), Output>, + requests: Vec, + offset: usize, // offset for splitting. +} + +impl Requests { + /// For each request, produce responses for each. + /// The responses vector produced goes up to the point where the responder + /// first returns `None`, an invalid response, or until all requests have been responded to. + pub fn respond_to_all(mut self, responder: F) -> Vec + where F: Fn(CompleteRequest) -> Option + { + let mut responses = Vec::new(); + let mut found_bad = false; + let offset = self.offset; + let output_kinds = self.output_kinds; + let mut outputs = self.outputs; + for (idx, req) in self.requests.into_iter().enumerate().map(|(idx, req)| (idx + offset, req)) { + let complete = req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + .expect("All outputs checked as invariant of `Requests` object; qed"); + + match responder(complete) { + Some(response) => { + response.fill_outputs(|out_idx, output| { + match output_kinds.get(&(idx, out_idx)) { + None => {}, + Some(out) => if out == &output.kind() { + outputs.insert((idx, out_idx), output); + } else { + // output kind doesn't match expected. + found_bad = true; + } + } + }); + + if found_bad { + return responses; + } + + responses.push(response); + } + None => return responses, + } + } + + responses + } + + /// Get access to the underlying slice of requests. + pub fn requests(&self) -> &[Request] { &self.requests } +} diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 7ad16ea4d..a84a37435 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use ethcore::transaction::Action; -use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream}; +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Address, H256, U256, Uint}; // re-exports of request types. @@ -65,6 +65,7 @@ pub use self::execution::{ }; /// Error indicating a reference to a non-existent or wrongly-typed output. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NoSuchOutput; /// An input to a request. @@ -77,7 +78,7 @@ pub enum Field { BackReference(usize, usize), } -impl From for Field { +impl From for Field { fn from(val: T) -> Self { Field::Scalar(val) } @@ -119,7 +120,8 @@ pub enum Output { } impl Output { - fn kind(&self) -> OutputKind { + /// Get the output kind. + pub fn kind(&self) -> OutputKind { match *self { Output::Hash(_) => OutputKind::Hash, Output::Number(_) => OutputKind::Number, @@ -158,6 +160,24 @@ impl From for HashOrNumber { } } +impl Decodable for HashOrNumber { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + rlp.val_at::(0).map(HashOrNumber::Hash) + .or_else(|_| rlp.val_at(0).map(HashOrNumber::Number)) + } +} + +impl Encodable for HashOrNumber { + fn rlp_append(&self, s: &mut RlpStream) { + match *self { + HashOrNumber::Hash(ref hash) => s.append(hash), + HashOrNumber::Number(ref num) => s.append(num), + }; + } +} + /// All request types, as they're sent over the network. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Request { @@ -305,13 +325,20 @@ impl IncompleteRequest for Request { pub enum Kind { /// A request for headers. Headers = 0, + /// A request for a header proof. HeaderProof = 1, // TransactionIndex = 2, + /// A request for block receipts. Receipts = 3, + /// A request for a block body. Body = 4, + /// A request for an account + merkle proof. Account = 5, + /// A request for contract storage + merkle proof Storage = 6, + /// A request for contract. Code = 7, + /// A request for transaction execution + state proof. Execution = 8, } @@ -336,7 +363,7 @@ impl Decodable for Kind { impl Encodable for Kind { fn rlp_append(&self, s: &mut RlpStream) { - s.append(self as &u8); + s.append(&(*self as u8)); } } @@ -366,14 +393,14 @@ impl Response { /// Fill reusable outputs by writing them into the function. pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { match *self { - Response::Headers(res) => res.fill_outputs(f), - Response::HeaderProof(res) => res.fill_outputs(f), - Response::Receipts(res) => res.fill_outputs(f), - Response::Body(res) => res.fill_outputs(f), - Response::Account(res) => res.fill_outputs(f), - Response::Storage(res) => res.fill_outputs(f), - Response::Code(res) => res.fill_outputs(f), - Response::Execution(res) => res.fill_outputs(f), + Response::Headers(ref res) => res.fill_outputs(f), + Response::HeaderProof(ref res) => res.fill_outputs(f), + Response::Receipts(ref res) => res.fill_outputs(f), + Response::Body(ref res) => res.fill_outputs(f), + Response::Account(ref res) => res.fill_outputs(f), + Response::Storage(ref res) => res.fill_outputs(f), + Response::Code(ref res) => res.fill_outputs(f), + Response::Execution(ref res) => res.fill_outputs(f), } } @@ -386,7 +413,7 @@ impl Response { Response::Account(_) => Kind::Account, Response::Storage(_) => Kind::Storage, Response::Code(_) => Kind::Code, - Respnse::Execution(_) => Kind::Execution, + Response::Execution(_) => Kind::Execution, } } } @@ -403,7 +430,7 @@ impl Decodable for Response { Kind::Account => Ok(Response::Account(rlp.val_at(1)?)), Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)), Kind::Code => Ok(Response::Code(rlp.val_at(1)?)), - Kind::Execution=> Ok(Response::Execution(rlp.val_at(1)?)), + Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)), } } } @@ -427,6 +454,7 @@ impl Encodable for Response { /// A potentially incomplete request. pub trait IncompleteRequest: Sized { + /// The complete variant of this request. type Complete; /// Check prior outputs against the needed inputs. @@ -453,7 +481,6 @@ pub mod header { use super::{Field, HashOrNumber, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::U256; /// Potentially incomplete headers request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -461,9 +488,9 @@ pub mod header { /// Start block. pub start: Field, /// Skip between. - pub skip: U256, + pub skip: u64, /// Maximum to return. - pub max: U256, + pub max: u64, /// Whether to reverse from start. pub reverse: bool, } @@ -499,7 +526,7 @@ pub mod header { match self.start { Field::Scalar(_) => Ok(()), Field::BackReference(req, idx) => - f(req, idx, OutputKind::Hash).or_else(|| f(req, idx, OutputKind::Number)) + f(req, idx, OutputKind::Hash).or_else(|_| f(req, idx, OutputKind::Number)) } } @@ -532,9 +559,9 @@ pub mod header { /// Start block. pub start: HashOrNumber, /// Skip between. - pub skip: U256, + pub skip: u64, /// Maximum to return. - pub max: U256, + pub max: u64, /// Whether to reverse from start. pub reverse: bool, } @@ -695,7 +722,7 @@ pub mod block_receipts { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::receipt::Receipt; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::H256; /// Potentially incomplete block receipts request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -725,7 +752,7 @@ pub mod block_receipts { fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> { - match self.num { + match self.hash { Field::Scalar(_) => Ok(()), Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), } @@ -791,7 +818,7 @@ pub mod block_body { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::H256; /// Potentially incomplete block body request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -821,7 +848,7 @@ pub mod block_body { fn check_outputs(&self, mut f: F) -> Result<(), NoSuchOutput> where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput> { - match self.num { + match self.hash { Field::Scalar(_) => Ok(()), Field::BackReference(req, idx) => f(req, idx, OutputKind::Hash), } @@ -869,14 +896,14 @@ pub mod block_body { impl Decodable for Response { fn decode(decoder: &D) -> Result where D: Decoder { use ethcore::header::Header as FullHeader; - use ethcore::transaction::SignedTransaction; + use ethcore::transaction::UnverifiedTransaction; let rlp = decoder.as_rlp(); let body_rlp = rlp.at(0)?; // check body validity. let _: Vec = rlp.val_at(0)?; - let _: Vec = rlp.val_at(1)?; + let _: Vec = rlp.val_at(1)?; Ok(Response { body: encoded::Body::new(body_rlp.as_raw().to_owned()), @@ -895,7 +922,6 @@ pub mod block_body { /// A request for an account proof. pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, U256, H256}; @@ -1028,7 +1054,7 @@ pub mod account { .append(&self.nonce) .append(&self.balance) .append(&self.code_hash) - .append(&self.storage_root) + .append(&self.storage_root); } } } @@ -1036,9 +1062,8 @@ pub mod account { /// A request for a storage proof. pub mod storage { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::{Bytes, H256}; /// Potentially incomplete request for an storage proof. #[derive(Debug, Clone, PartialEq, Eq)] @@ -1182,9 +1207,8 @@ pub mod storage { /// A request for contract code. pub mod contract_code { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; - use util::{Bytes, U256, H256}; + use util::{Bytes, H256}; /// Potentially incomplete contract code request. #[derive(Debug, Clone, PartialEq, Eq)] @@ -1299,7 +1323,6 @@ pub mod contract_code { /// A request for proof of execution. pub mod execution { use super::{Field, NoSuchOutput, OutputKind, Output}; - use ethcore::encoded; use ethcore::transaction::Action; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::{Bytes, Address, U256, H256, DBValue}; @@ -1328,7 +1351,7 @@ pub mod execution { let rlp = decoder.as_rlp(); Ok(Incomplete { block_hash: rlp.val_at(0)?, - address: rlp.val_at(1)?, + from: rlp.val_at(1)?, action: rlp.val_at(2)?, gas: rlp.val_at(3)?, gas_price: rlp.val_at(4)?, @@ -1344,7 +1367,7 @@ pub mod execution { .append(&self.block_hash) .append(&self.from); - match *self.action { + match self.action { Action::Create => s.append_empty_data(), Action::Call(ref addr) => s.append(addr), }; @@ -1432,7 +1455,7 @@ pub mod execution { let mut items = Vec::new(); for raw_item in rlp.at(0)?.iter() { let mut item = DBValue::new(); - item.append_slice(raw_item.data()); + item.append_slice(raw_item.data()?); items.push(item); } @@ -1444,7 +1467,7 @@ pub mod execution { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(&self.items.len()); + s.begin_list(self.items.len()); for item in &self.items { s.append(&&**item); From dbd05e6c92dcce035621b75ac2ab954f57ef3fd8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 7 Mar 2017 20:58:23 +0100 Subject: [PATCH 20/89] handle request packet in LightProtocol --- ethcore/light/src/net/context.rs | 4 ++-- ethcore/light/src/net/error.rs | 4 ++++ ethcore/light/src/net/mod.rs | 37 ++++++++++++++++++++++++++++---- 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 332d497a1..80a829962 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -50,13 +50,13 @@ pub trait IoContext { impl<'a> IoContext for NetworkContext<'a> { fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec) { if let Err(e) = self.send(peer, packet_id, packet_body) { - debug!(target: "les", "Error sending packet to peer {}: {}", peer, e); + debug!(target: "pip", "Error sending packet to peer {}: {}", peer, e); } } fn respond(&self, packet_id: u8, packet_body: Vec) { if let Err(e) = self.respond(packet_id, packet_body) { - debug!(target: "les", "Error responding to peer message: {}", e); + debug!(target: "pip", "Error responding to peer message: {}", e); } } diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs index dda78e0b6..1c0374c7e 100644 --- a/ethcore/light/src/net/error.rs +++ b/ethcore/light/src/net/error.rs @@ -56,6 +56,8 @@ pub enum Error { UnknownPeer, /// Unsolicited response. UnsolicitedResponse, + /// Bad back-reference in request. + BadBackReference, /// Not a server. NotServer, /// Unsupported protocol version. @@ -78,6 +80,7 @@ impl Error { Error::WrongNetwork => Punishment::Disable, Error::UnknownPeer => Punishment::Disconnect, Error::UnsolicitedResponse => Punishment::Disable, + Error::BadBackReference => Punishment::Disable, Error::NotServer => Punishment::Disable, Error::UnsupportedProtocolVersion(_) => Punishment::Disable, Error::BadProtocolVersion => Punishment::Disable, @@ -109,6 +112,7 @@ impl fmt::Display for Error { Error::WrongNetwork => write!(f, "Wrong network"), Error::UnknownPeer => write!(f, "Unknown peer"), Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"), + Error::BadBackReference => write!(f, "Bad back-reference in request."), Error::NotServer => write!(f, "Peer not a server."), Error::UnsupportedProtocolVersion(pv) => write!(f, "Unsupported protocol version: {}", pv), Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"), diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 1b2433fbe..8363fcfe7 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -642,13 +642,13 @@ impl LightProtocol { Ok(()) } + // Receive requests from a peer. fn request(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { // the maximum amount of requests we'll fill in a single packet. - const MAX_REQUESTS: usize = 512; - // the maximum amount of gas we'll prove execution of in a single packet. - const MAX_GAS: usize = 50_000_000; + const MAX_REQUESTS: usize = 256; use ::request_builder::RequestBuilder; + use ::request::CompleteRequest; let peers = self.peers.read(); let peer = match peers.get(peer) { @@ -661,8 +661,37 @@ impl LightProtocol { let mut peer = peer.lock(); let req_id: u64 = raw.val_at(0)?; + let mut cumulative_cost = U256::from(0); + let cur_buffer = peer.local_credits.current(); - unimplemented!() + let mut request_builder = RequestBuilder::default(); + + // deserialize requests, check costs and back-references. + for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { + let request: Request = request_rlp.as_val()?; + cumulative_cost = cumulative_cost + self.flow_params.compute_cost(&request); + if cumulative_cost > cur_buffer { return Err(Error::NoCredits) } + request_builder.push(request).map_err(|_| Error::BadBackReference)?; + } + + let requests = request_builder.build(); + + // respond to all requests until one fails. + let responses = requests.respond_to_all(|complete_req| { + match complete_req { + CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers), + CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof), + CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body), + CompleteRequest::Receipts(req) => self.provider.block_receipts(req).map(Response::Receipts), + CompleteRequest::Account(req) => self.provider.account_proof(req).map(Response::Account), + CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage), + CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code), + CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution), + } + }); + + io.respond(packet::RESPONSE, ::rlp::encode(&responses).to_vec()); + Ok(()) } fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { From ee034185a55858da6958c0c01839c45a5c3ec8d1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 15:28:46 +0100 Subject: [PATCH 21/89] handle response packets --- ethcore/light/src/net/mod.rs | 40 +++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 8363fcfe7..b6f514371 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -167,7 +167,9 @@ pub trait Handler: Send + Sync { /// Called when a peer requests relay of some transactions. fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { } /// Called when a peer responds to requests. - fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _relay: &[Response]) { } + /// Responses not guaranteed to contain valid data and are not yet checked against + /// the requests they correspond to. + fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _responses: &[Response]) { } /// Called when a peer responds with a transaction proof. Each proof is a vector of state items. fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { } /// Called to "tick" the handler periodically. @@ -380,11 +382,11 @@ impl LightProtocol { // - check whether peer exists // - check whether request was made // - check whether request kinds match - fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result { + fn pre_verify_response(&self, peer: &PeerId, raw: &UntrustedRlp) -> Result { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "pip", "pre-verifying response from peer {}, kind={:?}", peer, kind); + trace!(target: "pip", "pre-verifying response from peer {}", peer); let peers = self.peers.read(); let res = match peers.get(peer) { @@ -394,7 +396,7 @@ impl LightProtocol { let flow_info = peer_info.remote_flow.as_mut(); match (req_info, flow_info) { - (Some(request), Some(flow_info)) => { + (Some(_), Some(flow_info)) => { let &mut (ref mut c, ref mut flow) = flow_info; let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); c.update_to(actual_credits); @@ -662,15 +664,13 @@ impl LightProtocol { let req_id: u64 = raw.val_at(0)?; let mut cumulative_cost = U256::from(0); - let cur_buffer = peer.local_credits.current(); let mut request_builder = RequestBuilder::default(); - // deserialize requests, check costs and back-references. + // deserialize requests, check costs and request validity. for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; - cumulative_cost = cumulative_cost + self.flow_params.compute_cost(&request); - if cumulative_cost > cur_buffer { return Err(Error::NoCredits) } + peer.local_credits.deduct_cost(self.flow_params.compute_cost(&request))?; request_builder.push(request).map_err(|_| Error::BadBackReference)?; } @@ -690,12 +690,32 @@ impl LightProtocol { } }); - io.respond(packet::RESPONSE, ::rlp::encode(&responses).to_vec()); + io.respond(packet::RESPONSE, { + let mut stream = RlpStream::new_list(3); + let cur_credits = peer.local_credits.current(); + stream.append(&req_id).append(&cur_credits).append(&responses); + stream.out() + }); Ok(()) } + // handle a packet with responses. fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { - unimplemented!() + let (req_id, responses) = { + let id_guard = self.pre_verify_response(peer, &raw)?; + let responses: Vec = raw.val_at(2)?; + (id_guard.defuse(), responses) + }; + + for handler in &self.handlers { + handler.on_responses(&Ctx { + io: io, + proto: self, + peer: *peer, + }, req_id, &responses); + } + + Ok(()) } // Receive a set of transactions to relay. From bb39f104f46f3d79b85f197e2cd6f4c31b541fc1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 17:37:07 +0100 Subject: [PATCH 22/89] implement requesting from --- ethcore/light/src/net/context.rs | 16 ++++-- ethcore/light/src/net/mod.rs | 75 +++++++++++++++++----------- ethcore/light/src/net/request_set.rs | 30 ++++++++--- ethcore/light/src/provider.rs | 14 +++--- ethcore/light/src/request_builder.rs | 6 +-- ethcore/light/src/types/request.rs | 7 +-- 6 files changed, 92 insertions(+), 56 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 80a829962..659c117af 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -21,6 +21,7 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; use request::{self, Request}; +use request_builder::Requests; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions @@ -83,7 +84,12 @@ pub trait BasicContext { fn persistent_peer_id(&self, peer: PeerId) -> Option; /// Make a request from a peer. - fn request_from(&self, peer: PeerId, request: Request) -> Result; + /// + /// Fails on: nonexistent peer, network error, peer not server, + /// insufficient credits. Does not check capabilities before sending. + /// On success, returns a request id which can later be coordinated + /// with an event. + fn request_from(&self, peer: PeerId, request: Requests) -> Result; /// Make an announcement of new capabilities to the rest of the peers. // TODO: maybe just put this on a timer in LightProtocol? @@ -119,8 +125,8 @@ impl<'a> BasicContext for TickCtx<'a> { self.io.persistent_peer_id(id) } - fn request_from(&self, peer: PeerId, request: Request) -> Result { - self.proto.request_from(self.io, &peer, request) + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, &peer, requests) } fn make_announcement(&self, announcement: Announcement) { @@ -152,8 +158,8 @@ impl<'a> BasicContext for Ctx<'a> { self.io.persistent_peer_id(id) } - fn request_from(&self, peer: PeerId, request: Request) -> Result { - self.proto.request_from(self.io, &peer, request) + fn request_from(&self, peer: PeerId, requests: Requests) -> Result { + self.proto.request_from(self.io, &peer, requests) } fn make_announcement(&self, announcement: Announcement) { diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index b6f514371..57459ec01 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -35,6 +35,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; use request::{self, HashOrNumber, Request, Response}; +use request_builder::Requests; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; @@ -71,8 +72,8 @@ pub const PROTOCOL_VERSIONS: &'static [u8] = &[1]; /// Max protocol version. pub const MAX_PROTOCOL_VERSION: u8 = 1; -/// Packet count for LES. -pub const PACKET_COUNT: u8 = 17; +/// Packet count for PIP. +pub const PACKET_COUNT: u8 = 5; // packet ID definitions. mod packet { @@ -88,24 +89,21 @@ mod packet { // relay transactions to peers. pub const SEND_TRANSACTIONS: u8 = 0x04; - - // request and response for transaction proof. - // TODO: merge with request/response. - pub const GET_TRANSACTION_PROOF: u8 = 0x05; - pub const TRANSACTION_PROOF: u8 = 0x06; } // timeouts for different kinds of requests. all values are in milliseconds. -// TODO: variable timeouts based on request count. mod timeout { pub const HANDSHAKE: i64 = 2500; - pub const HEADERS: i64 = 2500; - pub const BODIES: i64 = 5000; - pub const RECEIPTS: i64 = 3500; - pub const PROOFS: i64 = 4000; - pub const CONTRACT_CODES: i64 = 5000; - pub const HEADER_PROOFS: i64 = 3500; - pub const TRANSACTION_PROOF: i64 = 5000; + pub const BASE: i64 = 1500; // base timeout for packet. + + // timeouts per request within packet. + pub const HEADERS: i64 = 250; // per header? + pub const BODY: i64 = 50; + pub const RECEIPT: i64 = 50; + pub const PROOF: i64 = 100; // state proof + pub const CONTRACT_CODE: i64 = 100; + pub const HEADER_PROOF: i64 = 100; + pub const TRANSACTION_PROOF: i64 = 1000; // per gas? } /// A request id. @@ -138,16 +136,7 @@ pub struct Peer { failed_requests: Vec, } -impl Peer { - // refund credits for a request. returns new amount of credits. - fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 { - flow_params.refund(&mut self.local_credits, amount); - - self.local_credits.current() - } -} - -/// An LES event handler. +/// A light protocol event handler. /// /// Each handler function takes a context which describes the relevant peer /// and gives references to the IO layer and protocol structure so new messages @@ -304,9 +293,37 @@ impl LightProtocol { /// insufficient credits. Does not check capabilities before sending. /// On success, returns a request id which can later be coordinated /// with an event. - // TODO: pass `Requests`. - pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result { - unimplemented!() + pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, requests: Requests) -> Result { + let peers = self.peers.read(); + let peer = match peers.get(peer_id) { + Some(peer) => peer, + None => return Err(Error::UnknownPeer), + }; + + let mut peer = peer.lock(); + let peer = &mut *peer; + match peer.remote_flow { + None => Err(Error::NotServer), + Some((ref mut creds, ref params)) => { + // check that enough credits are available. + let mut temp_creds: Credits = creds.clone(); + for request in requests.requests() { + temp_creds.deduct_cost(params.compute_cost(request))?; + } + *creds = temp_creds; + + let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); + io.send(*peer_id, packet::REQUEST, { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id.0).append(&requests.requests()); + stream.out() + }); + + // begin timeout. + peer.pending_requests.insert(req_id, requests, SteadyTime::now()); + Ok(req_id) + } + } } /// Make an announcement of new chain head and capabilities to all peers. @@ -663,8 +680,6 @@ impl LightProtocol { let mut peer = peer.lock(); let req_id: u64 = raw.val_at(0)?; - let mut cumulative_cost = U256::from(0); - let mut request_builder = RequestBuilder::default(); // deserialize requests, check costs and request validity. diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index c329d780f..eefc6dfd5 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -25,6 +25,7 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; use request::{self, Request}; +use request_builder::Requests; use net::{timeout, ReqId}; use time::{Duration, SteadyTime}; @@ -35,7 +36,7 @@ pub struct RequestSet { counter: u64, base: Option, ids: HashMap, - reqs: BTreeMap, + reqs: BTreeMap, } impl Default for RequestSet { @@ -50,8 +51,8 @@ impl Default for RequestSet { } impl RequestSet { - /// Push a request onto the stack. - pub fn insert(&mut self, req_id: ReqId, req: Request, now: SteadyTime) { + /// Push requests onto the stack. + pub fn insert(&mut self, req_id: ReqId, req: Requests, now: SteadyTime) { let counter = self.counter; self.ids.insert(req_id, counter); self.reqs.insert(counter, req); @@ -63,8 +64,8 @@ impl RequestSet { self.counter += 1; } - /// Remove a request from the stack. - pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option { + /// Remove a set of requests from the stack. + pub fn remove(&mut self, req_id: &ReqId, now: SteadyTime) -> Option { let id = match self.ids.remove(&req_id) { Some(id) => id, None => return None, @@ -89,7 +90,24 @@ impl RequestSet { None => return false, }; - unimplemented!() + let first_req = self.reqs.values().next() + .expect("base existing implies `reqs` non-empty; qed"); + + // timeout is a base + value per request contained within. + let timeout = first_req.requests().iter().fold(timeout::BASE, |tm, req| { + tm + match *req { + Request::Headers(_) => timeout::HEADERS, + Request::HeaderProof(_) => timeout::HEADER_PROOF, + Request::Receipts(_) => timeout::RECEIPT, + Request::Body(_) => timeout::BODY, + Request::Account(_) => timeout::PROOF, + Request::Storage(_) => timeout::PROOF, + Request::Code(_) => timeout::CONTRACT_CODE, + Request::Execution(_) => timeout::TRANSACTION_PROOF, + } + }); + + base + Duration::milliseconds(timeout) <= now } /// Collect all pending request ids. diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index be9239e4d..f6ffded82 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -24,7 +24,7 @@ use ethcore::client::{BlockChainClient, ProvingBlockChainClient}; use ethcore::transaction::PendingTransaction; use ethcore::ids::BlockId; use ethcore::encoded; -use util::{Bytes, DBValue, RwLock, H256}; +use util::{RwLock, H256}; use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; @@ -297,27 +297,27 @@ impl Provider for LightProvider { self.client.as_light_client().block_header(id) } - fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + fn block_body(&self, _req: request::CompleteBodyRequest) -> Option { None } - fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { + fn block_receipts(&self, _req: request::CompleteReceiptsRequest) -> Option { None } - fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + fn account_proof(&self, _req: request::CompleteAccountRequest) -> Option { None } - fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + fn storage_proof(&self, _req: request::CompleteStorageRequest) -> Option { None } - fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + fn contract_code(&self, _req: request::CompleteCodeRequest) -> Option { None } - fn header_proof(&self, req: request::CompleteHeaderProofRequest) -> Option { + fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { None } diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/request_builder.rs index 6233075bb..3533026a5 100644 --- a/ethcore/light/src/request_builder.rs +++ b/ethcore/light/src/request_builder.rs @@ -18,10 +18,10 @@ //! Push requests with `push`. Back-references and data required to verify responses must be //! supplied as well. -use std::collections::{HashMap, VecDeque}; +use std::collections::HashMap; use request::{ IncompleteRequest, CompleteRequest, Request, - Field, OutputKind, Output, NoSuchOutput, Response, + OutputKind, Output, NoSuchOutput, Response, }; /// Build chained requests. Push them onto the series with `push`, @@ -72,7 +72,7 @@ impl Requests { /// For each request, produce responses for each. /// The responses vector produced goes up to the point where the responder /// first returns `None`, an invalid response, or until all requests have been responded to. - pub fn respond_to_all(mut self, responder: F) -> Vec + pub fn respond_to_all(self, responder: F) -> Vec where F: Fn(CompleteRequest) -> Option { let mut responses = Vec::new(); diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index a84a37435..d6893b0e1 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -16,11 +16,8 @@ //! Light protocol request types. -use std::collections::HashMap; - -use ethcore::transaction::Action; use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; -use util::{Address, H256, U256, Uint}; +use util::H256; // re-exports of request types. pub use self::header::{ @@ -391,7 +388,7 @@ pub enum Response { impl Response { /// Fill reusable outputs by writing them into the function. - pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { + pub fn fill_outputs(&self, f: F) where F: FnMut(usize, Output) { match *self { Response::Headers(ref res) => res.fill_outputs(f), Response::HeaderProof(ref res) => res.fill_outputs(f), From 969261695831236b6eddc30b2af28e9714e0d2a7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 18:01:41 +0100 Subject: [PATCH 23/89] re-do cost table --- ethcore/light/src/client/mod.rs | 2 +- ethcore/light/src/net/context.rs | 1 - ethcore/light/src/net/mod.rs | 11 +++-- ethcore/light/src/net/request_credits.rs | 55 ++++++++++++++++++++++-- ethcore/light/src/net/request_set.rs | 2 +- 5 files changed, 59 insertions(+), 12 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 34f7ed990..c791caed1 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{Bytes, DBValue, H256, Mutex, RwLock}; +use util::{H256, Mutex, RwLock}; use self::header_chain::{AncestryIter, HeaderChain}; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 659c117af..513388a92 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -20,7 +20,6 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; -use request::{self, Request}; use request_builder::Requests; /// An I/O context which allows sending and receiving packets as well as diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 57459ec01..0241cf3f1 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -18,14 +18,13 @@ //! //! This uses a "Provider" to answer requests. -use ethcore::transaction::{Action, UnverifiedTransaction}; -use ethcore::receipt::Receipt; +use ethcore::transaction::UnverifiedTransaction; use io::TimerToken; use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use rlp::{RlpStream, Stream, UntrustedRlp, View}; use util::hash::H256; -use util::{Bytes, DBValue, Mutex, RwLock, U256}; +use util::{DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; use std::collections::HashMap; @@ -34,7 +33,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{self, HashOrNumber, Request, Response}; +use request::{Request, Response}; use request_builder::Requests; use self::request_credits::{Credits, FlowParams}; @@ -48,8 +47,8 @@ mod error; mod status; mod request_set; -#[cfg(test)] -mod tests; +// #[cfg(test)] +// mod tests; pub mod request_credits; diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index e3821e05a..abeb7e569 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -27,7 +27,6 @@ //! on any empirical timings or mathematical models. use request::{self, Request}; -use super::packet; use super::error::Error; use rlp::*; @@ -107,13 +106,63 @@ impl Default for CostTable { impl RlpEncodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { - unimplemented!() + fn append_cost(s: &mut RlpStream, cost: &U256, kind: request::Kind) { + s.begin_list(2).append(&kind).append(cost); + } + + s.begin_list(9).append(&self.base); + append_cost(s, &self.headers, request::Kind::Headers); + append_cost(s, &self.body, request::Kind::Body); + append_cost(s, &self.receipts, request::Kind::Receipts); + append_cost(s, &self.account, request::Kind::Account); + append_cost(s, &self.storage, request::Kind::Storage); + append_cost(s, &self.code, request::Kind::Code); + append_cost(s, &self.header_proof, request::Kind::HeaderProof); + append_cost(s, &self.transaction_proof, request::Kind::Execution); } } impl RlpDecodable for CostTable { fn decode(decoder: &D) -> Result where D: Decoder { - unimplemented!() + let rlp = decoder.as_rlp(); + let base = rlp.val_at(0)?; + + let mut headers = None; + let mut body = None; + let mut receipts = None; + let mut account = None; + let mut storage = None; + let mut code = None; + let mut header_proof = None; + let mut transaction_proof = None; + + for cost_list in rlp.iter().skip(1) { + let cost = cost_list.val_at(1)?; + match cost_list.val_at(0)? { + request::Kind::Headers => headers = Some(cost), + request::Kind::Body => body = Some(cost), + request::Kind::Receipts => receipts = Some(cost), + request::Kind::Account => account = Some(cost), + request::Kind::Storage => storage = Some(cost), + request::Kind::Code => code = Some(cost), + request::Kind::HeaderProof => header_proof = Some(cost), + request::Kind::Execution => transaction_proof = Some(cost), + } + } + + let unwrap_cost = |cost: Option| cost.ok_or(DecoderError::Custom("Not all costs specified in cost table.")); + + Ok(CostTable { + base: base, + headers: unwrap_cost(headers)?, + body: unwrap_cost(body)?, + receipts: unwrap_cost(receipts)?, + account: unwrap_cost(account)?, + storage: unwrap_cost(storage)?, + code: unwrap_cost(code)?, + header_proof: unwrap_cost(header_proof)?, + transaction_proof: unwrap_cost(transaction_proof)?, + }) } } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index eefc6dfd5..8405b8c89 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -24,7 +24,7 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; -use request::{self, Request}; +use request::Request; use request_builder::Requests; use net::{timeout, ReqId}; From 9268a1f59cf134e20c0fbac5495404732aaa920e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 18:27:29 +0100 Subject: [PATCH 24/89] get tests compiling --- ethcore/light/src/net/request_set.rs | 44 +++++++++++++++------------- ethcore/light/src/provider.rs | 6 ++-- ethcore/light/src/types/request.rs | 2 +- 3 files changed, 27 insertions(+), 25 deletions(-) diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index 8405b8c89..f66b44f6e 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -93,21 +93,7 @@ impl RequestSet { let first_req = self.reqs.values().next() .expect("base existing implies `reqs` non-empty; qed"); - // timeout is a base + value per request contained within. - let timeout = first_req.requests().iter().fold(timeout::BASE, |tm, req| { - tm + match *req { - Request::Headers(_) => timeout::HEADERS, - Request::HeaderProof(_) => timeout::HEADER_PROOF, - Request::Receipts(_) => timeout::RECEIPT, - Request::Body(_) => timeout::BODY, - Request::Account(_) => timeout::PROOF, - Request::Storage(_) => timeout::PROOF, - Request::Code(_) => timeout::CONTRACT_CODE, - Request::Execution(_) => timeout::TRANSACTION_PROOF, - } - }); - - base + Duration::milliseconds(timeout) <= now + base + compute_timeout(&first_req) <= now } /// Collect all pending request ids. @@ -124,25 +110,43 @@ impl RequestSet { pub fn is_empty(&self) -> bool { self.len() == 0 } } +// helper to calculate timeout for a specific set of requests. +// it's a base amount + some amount per request. +fn compute_timeout(reqs: &Requests) -> Duration { + Duration::milliseconds(reqs.requests().iter().fold(timeout::BASE, |tm, req| { + tm + match *req { + Request::Headers(_) => timeout::HEADERS, + Request::HeaderProof(_) => timeout::HEADER_PROOF, + Request::Receipts(_) => timeout::RECEIPT, + Request::Body(_) => timeout::BODY, + Request::Account(_) => timeout::PROOF, + Request::Storage(_) => timeout::PROOF, + Request::Code(_) => timeout::CONTRACT_CODE, + Request::Execution(_) => timeout::TRANSACTION_PROOF, + } + })) +} + #[cfg(test)] mod tests { - use net::{timeout, ReqId}; - use request::{Request, Receipts}; + use net::ReqId; + use request_builder::RequestBuilder; use time::{SteadyTime, Duration}; - use super::RequestSet; + use super::{RequestSet, compute_timeout}; #[test] fn multi_timeout() { let test_begin = SteadyTime::now(); let mut req_set = RequestSet::default(); - let the_req = Request::Receipts(Receipts { block_hashes: Vec::new() }); + let the_req = RequestBuilder::default().build(); + let req_time = compute_timeout(&the_req); req_set.insert(ReqId(0), the_req.clone(), test_begin); req_set.insert(ReqId(1), the_req, test_begin + Duration::seconds(1)); assert_eq!(req_set.base, Some(test_begin)); - let test_end = test_begin + Duration::milliseconds(timeout::RECEIPTS); + let test_end = test_begin + req_time; assert!(req_set.check_timeout(test_end)); req_set.remove(&ReqId(0), test_begin + Duration::seconds(1)).unwrap(); diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index f6ffded82..36653fe4d 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -349,10 +349,8 @@ mod tests { let client = TestBlockChainClient::new(); client.add_blocks(2000, EachBlockWith::Nothing); - let req = ::request::HeaderProof { - cht_number: 0, - block_number: 1500, - from_level: 0, + let req = ::request::CompleteHeaderProofRequest { + num: 1500, }; assert!(client.header_proof(req.clone()).is_none()); diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index d6893b0e1..42aab4e20 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -346,7 +346,7 @@ impl Decodable for Kind { match rlp.as_val::()? { 0 => Ok(Kind::Headers), 1 => Ok(Kind::HeaderProof), - // 2 => Ok(Kind::TransactionIndex, + // 2 => Ok(Kind::TransactionIndex), 3 => Ok(Kind::Receipts), 4 => Ok(Kind::Body), 5 => Ok(Kind::Account), From a1186727af700a879df17850abb46ec0f4d86e65 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 18:38:25 +0100 Subject: [PATCH 25/89] fix cost table RLP encoding --- ethcore/light/src/net/request_credits.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index abeb7e569..4f7f8a6a3 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -104,10 +104,14 @@ impl Default for CostTable { } } -impl RlpEncodable for CostTable { +impl Encodable for CostTable { fn rlp_append(&self, s: &mut RlpStream) { fn append_cost(s: &mut RlpStream, cost: &U256, kind: request::Kind) { - s.begin_list(2).append(&kind).append(cost); + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&kind, s); + s.append(cost); } s.begin_list(9).append(&self.base); @@ -122,7 +126,7 @@ impl RlpEncodable for CostTable { } } -impl RlpDecodable for CostTable { +impl Decodable for CostTable { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); let base = rlp.val_at(0)?; From d9087dd2b6e55d9fc2ebb5ea1470a045fa869108 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 19:50:26 +0100 Subject: [PATCH 26/89] roundtrip tests for request types --- ethcore/light/src/types/request.rs | 237 +++++++++++++++++++++++++++-- 1 file changed, 223 insertions(+), 14 deletions(-) diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 42aab4e20..880bb99f2 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -100,10 +100,13 @@ impl Encodable for Field { fn rlp_append(&self, s: &mut RlpStream) { s.begin_list(2); match *self { - Field::Scalar(ref data) => s.append(&0u8).append(data), - Field::BackReference(ref req, ref idx) => - s.append(&1u8).begin_list(2).append(req).append(idx), - }; + Field::Scalar(ref data) => { + s.append(&0u8).append(data); + } + Field::BackReference(ref req, ref idx) => { + s.append(&1u8).begin_list(2).append(req).append(idx); + } + } } } @@ -161,8 +164,8 @@ impl Decodable for HashOrNumber { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); - rlp.val_at::(0).map(HashOrNumber::Hash) - .or_else(|_| rlp.val_at(0).map(HashOrNumber::Number)) + rlp.as_val::().map(HashOrNumber::Hash) + .or_else(|_| rlp.as_val().map(HashOrNumber::Number)) } } @@ -582,7 +585,7 @@ pub mod header { let mut headers = Vec::new(); - for item in rlp.at(0)?.iter() { + for item in rlp.iter() { // check that it's a valid encoding. // TODO: just return full headers here? let _: FullHeader = item.as_val()?; @@ -798,7 +801,7 @@ pub mod block_receipts { let rlp = decoder.as_rlp(); Ok(Response { - receipts: rlp.val_at(0)?, + receipts: rlp.as_val()?, }) } } @@ -896,22 +899,20 @@ pub mod block_body { use ethcore::transaction::UnverifiedTransaction; let rlp = decoder.as_rlp(); - let body_rlp = rlp.at(0)?; // check body validity. let _: Vec = rlp.val_at(0)?; let _: Vec = rlp.val_at(1)?; Ok(Response { - body: encoded::Body::new(body_rlp.as_raw().to_owned()), + body: encoded::Body::new(rlp.as_raw().to_owned()), }) } } impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2) - .append_raw(&self.body.rlp().as_raw(), 2); + s.append_raw(&self.body.rlp().as_raw(), 2); } } } @@ -1305,7 +1306,7 @@ pub mod contract_code { let rlp = decoder.as_rlp(); Ok(Response { - code: rlp.val_at(0)?, + code: rlp.as_val()?, }) } } @@ -1450,7 +1451,7 @@ pub mod execution { fn decode(decoder: &D) -> Result where D: Decoder { let rlp = decoder.as_rlp(); let mut items = Vec::new(); - for raw_item in rlp.at(0)?.iter() { + for raw_item in rlp.iter() { let mut item = DBValue::new(); item.append_slice(raw_item.data()?); items.push(item); @@ -1472,3 +1473,211 @@ pub mod execution { } } } + +#[cfg(test)] +mod tests { + use super::*; + use ethcore::header::Header; + + fn check_roundtrip(val: T) + where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug + { + let bytes = ::rlp::encode(&val); + let new_val: T = ::rlp::decode(&bytes); + assert_eq!(val, new_val); + } + + #[test] + fn hash_or_number_roundtrip() { + let hash = HashOrNumber::Hash(H256::default()); + let number = HashOrNumber::Number(5); + + check_roundtrip(hash); + check_roundtrip(number); + } + + #[test] + fn field_roundtrip() { + let field_scalar = Field::Scalar(5usize); + let field_back: Field = Field::BackReference(1, 2); + + check_roundtrip(field_scalar); + check_roundtrip(field_back); + } + + #[test] + fn headers_roundtrip() { + let req = IncompleteHeadersRequest { + start: Field::Scalar(5u64.into()), + skip: 0, + max: 100, + reverse: false, + }; + + let full_req = Request::Headers(req.clone()); + let res = HeadersResponse { + headers: vec![ + ::ethcore::encoded::Header::new(::rlp::encode(&Header::default()).to_vec()) + ] + }; + let full_res = Response::Headers(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn header_proof_roundtrip() { + let req = IncompleteHeaderProofRequest { + num: Field::BackReference(1, 234), + }; + + let full_req = Request::HeaderProof(req.clone()); + let res = HeaderProofResponse { + proof: Vec::new(), + hash: Default::default(), + td: 100.into(), + }; + let full_res = Response::HeaderProof(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn receipts_roundtrip() { + let req = IncompleteReceiptsRequest { + hash: Field::Scalar(Default::default()), + }; + + let full_req = Request::Receipts(req.clone()); + let res = ReceiptsResponse { + receipts: vec![Default::default(), Default::default()], + }; + let full_res = Response::Receipts(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn body_roundtrip() { + let req = IncompleteBodyRequest { + hash: Field::Scalar(Default::default()), + }; + + let full_req = Request::Body(req.clone()); + let res = BodyResponse { + body: { + let mut stream = RlpStream::new_list(2); + stream.begin_list(0).begin_list(0); + ::ethcore::encoded::Body::new(stream.out()) + }, + }; + let full_res = Response::Body(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn account_roundtrip() { + let req = IncompleteAccountRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + }; + + let full_req = Request::Account(req.clone()); + let res = AccountResponse { + proof: Vec::new(), + nonce: 100.into(), + balance: 123456.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }; + let full_res = Response::Account(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn storage_roundtrip() { + let req = IncompleteStorageRequest { + block_hash: Field::Scalar(Default::default()), + address_hash: Field::BackReference(1, 2), + key_hash: Field::BackReference(3, 2), + }; + + let full_req = Request::Storage(req.clone()); + let res = StorageResponse { + proof: Vec::new(), + value: H256::default(), + }; + let full_res = Response::Storage(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn code_roundtrip() { + let req = IncompleteCodeRequest { + block_hash: Field::Scalar(Default::default()), + code_hash: Field::BackReference(3, 2), + }; + + let full_req = Request::Code(req.clone()); + let res = CodeResponse { + code: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4], + }; + let full_res = Response::Code(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } + + #[test] + fn execution_roundtrip() { + use util::DBValue; + + let req = IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::ethcore::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }; + + let full_req = Request::Execution(req.clone()); + let res = ExecutionResponse { + items: vec![DBValue::new(), { + let mut value = DBValue::new(); + value.append_slice(&[1, 1, 1, 2, 3]); + value + }], + }; + let full_res = Response::Execution(res.clone()); + + check_roundtrip(req); + check_roundtrip(full_req); + check_roundtrip(res); + check_roundtrip(full_res); + } +} From aea9b1d6ccdb5a093094ba0c6f1b0aa188001da1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 20:07:45 +0100 Subject: [PATCH 27/89] request builder tests --- ethcore/light/src/request_builder.rs | 50 ++++++++++++++++++++++++++++ ethcore/light/src/types/request.rs | 4 +-- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/request_builder.rs index 3533026a5..867bb6dcc 100644 --- a/ethcore/light/src/request_builder.rs +++ b/ethcore/light/src/request_builder.rs @@ -114,3 +114,53 @@ impl Requests { /// Get access to the underlying slice of requests. pub fn requests(&self) -> &[Request] { &self.requests } } + +#[cfg(test)] +mod tests { + use request::*; + use super::RequestBuilder; + use util::H256; + + #[test] + fn all_scalar() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })).unwrap(); + builder.push(Request::Receipts(IncompleteReceiptsRequest { + hash: H256::default().into(), + })).unwrap(); + } + + #[test] + #[should_panic] + fn missing_backref() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(100, 3), + })).unwrap(); + } + + #[test] + #[should_panic] + fn wrong_kind() { + let mut builder = RequestBuilder::default(); + assert!(builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), + })).is_ok()); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: Field::BackReference(0, 0), + })).unwrap(); + } + + #[test] + fn good_backreference() { + let mut builder = RequestBuilder::default(); + builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { + num: 100.into(), // header proof puts hash at output 0. + })).unwrap(); + builder.push(Request::Receipts(IncompleteReceiptsRequest { + hash: Field::BackReference(0, 0), + })).unwrap(); + } +} diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request.rs index 880bb99f2..1a1276951 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request.rs @@ -649,7 +649,7 @@ pub mod header_proof { } fn note_outputs(&self, mut note: F) where F: FnMut(usize, OutputKind) { - note(1, OutputKind::Hash); + note(0, OutputKind::Hash); } fn fill(self, oracle: F) -> Result @@ -691,7 +691,7 @@ pub mod header_proof { impl Response { /// Fill reusable outputs by providing them to the function. pub fn fill_outputs(&self, mut f: F) where F: FnMut(usize, Output) { - f(1, Output::Hash(self.hash)); + f(0, Output::Hash(self.hash)); } } From 8fb0a2d417d82eefd9f5b8358088ea72d5c0bed4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 8 Mar 2017 20:11:22 +0100 Subject: [PATCH 28/89] move request_builder -> request::builder --- ethcore/light/src/lib.rs | 1 - ethcore/light/src/net/context.rs | 2 +- ethcore/light/src/net/mod.rs | 5 ++--- ethcore/light/src/net/request_set.rs | 4 ++-- .../src/{request_builder.rs => types/request/builder.rs} | 0 ethcore/light/src/types/{request.rs => request/mod.rs} | 4 ++++ 6 files changed, 9 insertions(+), 7 deletions(-) rename ethcore/light/src/{request_builder.rs => types/request/builder.rs} (100%) rename ethcore/light/src/types/{request.rs => request/mod.rs} (99%) diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index b15c85242..81a974192 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -38,7 +38,6 @@ pub mod net; //pub mod on_demand; pub mod transaction_queue; pub mod cache; -pub mod request_builder; #[cfg(not(feature = "ipc"))] pub mod provider; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 513388a92..9eafead57 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -20,7 +20,7 @@ use network::{NetworkContext, PeerId, NodeId}; use super::{Announcement, LightProtocol, ReqId}; use super::error::Error; -use request_builder::Requests; +use request::Requests; /// An I/O context which allows sending and receiving packets as well as /// disconnecting peers. This is used as a generalization of the portions diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 0241cf3f1..7929f7b43 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -33,8 +33,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use provider::Provider; -use request::{Request, Response}; -use request_builder::Requests; +use request::{Request, Requests, Response}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; @@ -665,7 +664,7 @@ impl LightProtocol { // the maximum amount of requests we'll fill in a single packet. const MAX_REQUESTS: usize = 256; - use ::request_builder::RequestBuilder; + use ::request::RequestBuilder; use ::request::CompleteRequest; let peers = self.peers.read(); diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index f66b44f6e..a2391ef6f 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -25,7 +25,7 @@ use std::collections::{BTreeMap, HashMap}; use std::iter::FromIterator; use request::Request; -use request_builder::Requests; +use request::Requests; use net::{timeout, ReqId}; use time::{Duration, SteadyTime}; @@ -130,7 +130,7 @@ fn compute_timeout(reqs: &Requests) -> Duration { #[cfg(test)] mod tests { use net::ReqId; - use request_builder::RequestBuilder; + use request::RequestBuilder; use time::{SteadyTime, Duration}; use super::{RequestSet, compute_timeout}; diff --git a/ethcore/light/src/request_builder.rs b/ethcore/light/src/types/request/builder.rs similarity index 100% rename from ethcore/light/src/request_builder.rs rename to ethcore/light/src/types/request/builder.rs diff --git a/ethcore/light/src/types/request.rs b/ethcore/light/src/types/request/mod.rs similarity index 99% rename from ethcore/light/src/types/request.rs rename to ethcore/light/src/types/request/mod.rs index 1a1276951..383e1a06a 100644 --- a/ethcore/light/src/types/request.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -19,6 +19,8 @@ use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Stream, View}; use util::H256; +mod builder; + // re-exports of request types. pub use self::header::{ Complete as CompleteHeadersRequest, @@ -61,6 +63,8 @@ pub use self::execution::{ Response as ExecutionResponse, }; +pub use self::builder::{RequestBuilder, Requests}; + /// Error indicating a reference to a non-existent or wrongly-typed output. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NoSuchOutput; From 391eb4b66c01634ca4665366419c4296aa048225 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 9 Mar 2017 16:55:13 +0100 Subject: [PATCH 29/89] get network tests working --- ethcore/light/src/net/mod.rs | 15 +- ethcore/light/src/net/request_credits.rs | 9 + ethcore/light/src/net/tests/mod.rs | 309 +++++++++++++---------- ethcore/light/src/types/request/mod.rs | 35 ++- 4 files changed, 224 insertions(+), 144 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 7929f7b43..402f3ac3a 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -46,8 +46,8 @@ mod error; mod status; mod request_set; -// #[cfg(test)] -// mod tests; +#[cfg(test)] +mod tests; pub mod request_credits; @@ -660,7 +660,7 @@ impl LightProtocol { } // Receive requests from a peer. - fn request(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { + fn request(&self, peer_id: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> { // the maximum amount of requests we'll fill in a single packet. const MAX_REQUESTS: usize = 256; @@ -668,7 +668,7 @@ impl LightProtocol { use ::request::CompleteRequest; let peers = self.peers.read(); - let peer = match peers.get(peer) { + let peer = match peers.get(peer_id) { Some(peer) => peer, None => { debug!(target: "pip", "Ignoring request from unknown peer"); @@ -680,7 +680,10 @@ impl LightProtocol { let req_id: u64 = raw.val_at(0)?; let mut request_builder = RequestBuilder::default(); + trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); + // deserialize requests, check costs and request validity. + peer.local_credits.deduct_cost(self.flow_params.base_cost())?; for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; peer.local_credits.deduct_cost(self.flow_params.compute_cost(&request))?; @@ -688,6 +691,8 @@ impl LightProtocol { } let requests = request_builder.build(); + let num_requests = requests.requests().len(); + trace!(target: "pip", "Beginning to respond to requests (id: {}) from peer {}", req_id, peer_id); // respond to all requests until one fails. let responses = requests.respond_to_all(|complete_req| { @@ -703,6 +708,8 @@ impl LightProtocol { } }); + trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); + io.respond(packet::RESPONSE, { let mut stream = RlpStream::new_list(3); let cur_credits = peer.local_credits.current(); diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 4f7f8a6a3..29f0fff95 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -215,6 +215,9 @@ impl FlowParams { /// Get a reference to the cost table. pub fn cost_table(&self) -> &CostTable { &self.costs } + /// Get the base cost of a request. + pub fn base_cost(&self) -> U256 { self.costs.base } + /// Get a reference to the recharge rate. pub fn recharge_rate(&self) -> &U256 { &self.recharge } @@ -233,6 +236,12 @@ impl FlowParams { } } + /// Compute the cost of a set of requests. + /// This is the base cost plus the cost of each individual request. + pub fn compute_cost_multi(&self, requests: &[Request]) -> U256 { + requests.iter().fold(self.costs.base, |cost, req| cost + self.compute_cost(req)) + } + /// Create initial credits. pub fn create_credits(&self) -> Credits { Credits { diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 6a9de1467..bc7ab2e10 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -27,15 +27,31 @@ use network::{PeerId, NodeId}; use net::request_credits::FlowParams; use net::context::IoContext; use net::status::{Capabilities, Status, write_handshake}; -use net::{encode_request, LightProtocol, Params, packet, Peer}; +use net::{LightProtocol, Params, packet, Peer}; use provider::Provider; -use request::{self, Request, Headers}; +use request; +use request::*; use rlp::*; -use util::{Address, Bytes, DBValue, H256, U256}; +use util::{Address, H256, U256}; use std::sync::Arc; +// helper for encoding a single request into a packet. +// panics on bad backreference. +fn encode_single(request: Request) -> Requests { + let mut builder = RequestBuilder::default(); + builder.push(request).unwrap(); + builder.build() +} + +// helper for making a packet out of `Requests`. +fn make_packet(req_id: usize, requests: &Requests) -> Vec { + let mut stream = RlpStream::new_list(2); + stream.append(&req_id).append(&requests.requests()); + stream.out() +} + // expected result from a call. #[derive(Debug, PartialEq, Eq)] enum Expect { @@ -99,35 +115,45 @@ impl Provider for TestProvider { self.0.client.block_header(id) } - fn block_body(&self, id: BlockId) -> Option { - self.0.client.block_body(id) + fn block_body(&self, req: request::CompleteBodyRequest) -> Option { + self.0.client.block_body(req) } - fn block_receipts(&self, hash: &H256) -> Option { - self.0.client.block_receipts(&hash) + fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option { + self.0.client.block_receipts(req) } - fn state_proof(&self, req: request::StateProof) -> Vec { - match req.key2 { - Some(_) => vec![::util::sha3::SHA3_NULL_RLP.to_vec()], - None => { - // sort of a leaf node - let mut stream = RlpStream::new_list(2); - stream.append(&req.key1).append_empty_data(); - vec![stream.out()] - } - } + fn account_proof(&self, req: request::CompleteAccountRequest) -> Option { + // sort of a leaf node + let mut stream = RlpStream::new_list(2); + stream.append(&req.address_hash).append_empty_data(); + Some(AccountResponse { + proof: vec![stream.out()], + balance: 10.into(), + nonce: 100.into(), + code_hash: Default::default(), + storage_root: Default::default(), + }) } - fn contract_code(&self, req: request::ContractCode) -> Bytes { - req.account_key.iter().chain(req.account_key.iter()).cloned().collect() + fn storage_proof(&self, req: request::CompleteStorageRequest) -> Option { + Some(StorageResponse { + proof: vec![::rlp::encode(&req.key_hash).to_vec()], + value: req.key_hash | req.address_hash, + }) } - fn header_proof(&self, _req: request::HeaderProof) -> Option<(encoded::Header, Vec)> { + fn contract_code(&self, req: request::CompleteCodeRequest) -> Option { + Some(CodeResponse { + code: req.block_hash.iter().chain(req.code_hash.iter()).cloned().collect(), + }) + } + + fn header_proof(&self, _req: request::CompleteHeaderProofRequest) -> Option { None } - fn transaction_proof(&self, _req: request::TransactionProof) -> Option> { + fn transaction_proof(&self, _req: request::CompleteExecutionRequest) -> Option { None } @@ -226,14 +252,15 @@ fn credit_overflow() { } // 1000 requests is far too many for the default flow params. - let request = encode_request(&Request::Headers(Headers { - start: 1.into(), + let requests = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), max: 1000, skip: 0, reverse: false, - }), 111); + })); + let request = make_packet(111, &requests); - proto.handle_packet(&Expect::Punish(1), &1, packet::GET_BLOCK_HEADERS, &request); + proto.handle_packet(&Expect::Punish(1), &1, packet::REQUEST, &request); } // test the basic request types -- these just make sure that requests are parsed @@ -259,33 +286,36 @@ fn get_block_headers() { proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } - let request = Headers { - start: 1.into(), + let request = Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(1).into(), max: 10, skip: 0, reverse: false, - }; + }); + let req_id = 111; - let request_body = encode_request(&Request::Headers(request.clone()), req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { let headers: Vec<_> = (0..10).map(|i| provider.client.block_header(BlockId::Number(i + 1)).unwrap()).collect(); assert_eq!(headers.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Headers, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); - let mut response_stream = RlpStream::new_list(3); + let response = vec![Response::Headers(HeadersResponse { + headers: headers, + })]; - response_stream.append(&req_id).append(&new_creds).begin_list(10); - for header in headers { - response_stream.append_raw(&header.into_inner(), 1); - } + let mut stream = RlpStream::new_list(3); + stream.append(&req_id).append(&new_creds).append(&response); - response_stream.out() + stream.out() }; - let expected = Expect::Respond(packet::BLOCK_HEADERS, response); - proto.handle_packet(&expected, &1, packet::GET_BLOCK_HEADERS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -308,33 +338,32 @@ fn get_block_bodies() { proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); } - let request = request::Bodies { - block_hashes: (0..10).map(|i| - provider.client.block_header(BlockId::Number(i)).unwrap().hash() - ).collect() - }; + let mut builder = RequestBuilder::default(); + let mut bodies = Vec::new(); + for i in 0..10 { + let hash = provider.client.block_header(BlockId::Number(i)).unwrap().hash(); + builder.push(Request::Body(IncompleteBodyRequest { + hash: hash.into(), + })).unwrap(); + bodies.push(Response::Body(provider.client.block_body(CompleteBodyRequest { + hash: hash, + }).unwrap())); + } let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let request_body = encode_request(&Request::Bodies(request.clone()), req_id); let response = { - let bodies: Vec<_> = (0..10).map(|i| provider.client.block_body(BlockId::Number(i + 1)).unwrap()).collect(); - assert_eq!(bodies.len(), 10); - - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Bodies, 10); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(10); - for body in bodies { - response_stream.append_raw(&body.into_inner(), 1); - } - + response_stream.append(&req_id).append(&new_creds).append(&bodies); response_stream.out() }; - let expected = Expect::Respond(packet::BLOCK_BODIES, response); - proto.handle_packet(&expected, &1, packet::GET_BLOCK_BODIES, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -359,36 +388,37 @@ fn get_block_receipts() { // find the first 10 block hashes starting with `f` because receipts are only provided // by the test client in that case. - let block_hashes: Vec<_> = (0..1000).map(|i| - provider.client.block_header(BlockId::Number(i)).unwrap().hash() - ).filter(|hash| format!("{}", hash).starts_with("f")).take(10).collect(); + let block_hashes: Vec = (0..1000) + .map(|i| provider.client.block_header(BlockId::Number(i)).unwrap().hash()) + .filter(|hash| format!("{}", hash).starts_with("f")) + .take(10) + .collect(); - let request = request::Receipts { - block_hashes: block_hashes.clone(), - }; + let mut builder = RequestBuilder::default(); + let mut receipts = Vec::new(); + for hash in block_hashes.iter().cloned() { + builder.push(Request::Receipts(IncompleteReceiptsRequest { hash: hash.into() })).unwrap(); + receipts.push(Response::Receipts(provider.client.block_receipts(CompleteReceiptsRequest { + hash: hash + }).unwrap())); + } let req_id = 111; + let requests = builder.build(); + let request_body = make_packet(req_id, &requests); - let request_body = encode_request(&Request::Receipts(request.clone()), req_id); let response = { - let receipts: Vec<_> = block_hashes.iter() - .map(|hash| provider.client.block_receipts(hash).unwrap()) - .collect(); + assert_eq!(receipts.len(), 10); - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Receipts, receipts.len()); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(receipts.len()); - for block_receipts in receipts { - response_stream.append_raw(&block_receipts, 1); - } - + response_stream.append(&req_id).append(&new_creds).append(&receipts); response_stream.out() }; - let expected = Expect::Respond(packet::RECEIPTS, response); - proto.handle_packet(&expected, &1, packet::GET_RECEIPTS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -397,8 +427,9 @@ fn get_state_proofs() { let capabilities = capabilities(); let (provider, proto) = setup(flow_params.clone(), capabilities.clone()); + let provider = TestProvider(provider); - let cur_status = status(provider.client.chain_info()); + let cur_status = status(provider.0.client.chain_info()); { let packet_body = write_handshake(&cur_status, &capabilities, Some(&flow_params)); @@ -407,40 +438,45 @@ fn get_state_proofs() { } let req_id = 112; - let key1 = U256::from(11223344).into(); - let key2 = U256::from(99988887).into(); + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let request = Request::StateProofs (request::StateProofs { - requests: vec![ - request::StateProof { block: H256::default(), key1: key1, key2: None, from_level: 0 }, - request::StateProof { block: H256::default(), key1: key1, key2: Some(key2), from_level: 0}, - ] - }); + let mut builder = RequestBuilder::default(); + builder.push(Request::Account(IncompleteAccountRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + })).unwrap(); + builder.push(Request::Storage(IncompleteStorageRequest { + block_hash: H256::default().into(), + address_hash: key1.into(), + key_hash: key2.into(), + })).unwrap(); - let request_body = encode_request(&request, req_id); + let requests = builder.build(); + + let request_body = make_packet(req_id, &requests); let response = { - let proofs = vec![ - { let mut stream = RlpStream::new_list(2); stream.append(&key1).append_empty_data(); vec![stream.out()] }, - vec![::util::sha3::SHA3_NULL_RLP.to_vec()], + let responses = vec![ + Response::Account(provider.account_proof(CompleteAccountRequest { + block_hash: H256::default(), + address_hash: key1, + }).unwrap()), + Response::Storage(provider.storage_proof(CompleteStorageRequest { + block_hash: H256::default(), + address_hash: key1, + key_hash: key2, + }).unwrap()), ]; - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::StateProofs, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - - response_stream.append(&req_id).append(&new_creds).begin_list(2); - for proof in proofs { - response_stream.begin_list(proof.len()); - for node in proof { - response_stream.append_raw(&node, 1); - } - } - + response_stream.append(&req_id).append(&new_creds).append(&responses); response_stream.out() }; - let expected = Expect::Respond(packet::PROOFS, response); - proto.handle_packet(&expected, &1, packet::GET_PROOFS, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -459,37 +495,31 @@ fn get_contract_code() { } let req_id = 112; - let key1 = U256::from(11223344).into(); - let key2 = U256::from(99988887).into(); + let key1: H256 = U256::from(11223344).into(); + let key2: H256 = U256::from(99988887).into(); - let request = Request::Codes (request::ContractCodes { - code_requests: vec![ - request::ContractCode { block_hash: H256::default(), account_key: key1 }, - request::ContractCode { block_hash: H256::default(), account_key: key2 }, - ], + let request = Request::Code(IncompleteCodeRequest { + block_hash: key1.into(), + code_hash: key2.into(), }); - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); let response = { - let codes: Vec> = vec![ - key1.iter().chain(key1.iter()).cloned().collect(), - key2.iter().chain(key2.iter()).cloned().collect(), - ]; + let response = vec![Response::Code(CodeResponse { + code: key1.iter().chain(key2.iter()).cloned().collect(), + })]; - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::Codes, 2); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); - response_stream.append(&req_id).append(&new_creds).begin_list(2); - for code in codes { - response_stream.append(&code); - } - + response_stream.append(&req_id).append(&new_creds).append(&response); response_stream.out() }; - let expected = Expect::Respond(packet::CONTRACT_CODES, response); - proto.handle_packet(&expected, &1, packet::GET_CONTRACT_CODES, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -508,8 +538,8 @@ fn proof_of_execution() { } let req_id = 112; - let mut request = Request::TransactionProof (request::TransactionProof { - at: H256::default(), + let mut request = Request::Execution(request::IncompleteExecutionRequest { + block_hash: H256::default().into(), from: Address::default(), action: Action::Call(Address::default()), gas: 100.into(), @@ -519,9 +549,11 @@ fn proof_of_execution() { }); // first: a valid amount to request execution of. - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let response = { - let new_creds = *flow_params.limit() - flow_params.compute_cost(request::Kind::TransactionProof, 100); + let new_creds = *flow_params.limit() - flow_params.compute_cost_multi(requests.requests()); let mut response_stream = RlpStream::new_list(3); response_stream.append(&req_id).append(&new_creds).begin_list(0); @@ -529,17 +561,19 @@ fn proof_of_execution() { response_stream.out() }; - let expected = Expect::Respond(packet::TRANSACTION_PROOF, response); - proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + let expected = Expect::Respond(packet::RESPONSE, response); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); // next: way too much requested gas. - if let Request::TransactionProof(ref mut req) = request { + if let Request::Execution(ref mut req) = request { req.gas = 100_000_000.into(); } let req_id = 113; - let request_body = encode_request(&request, req_id); + let requests = encode_single(request.clone()); + let request_body = make_packet(req_id, &requests); + let expected = Expect::Punish(1); - proto.handle_packet(&expected, &1, packet::GET_TRANSACTION_PROOF, &request_body); + proto.handle_packet(&expected, &1, packet::REQUEST, &request_body); } #[test] @@ -554,12 +588,13 @@ fn id_guard() { let req_id_1 = ReqId(5143); let req_id_2 = ReqId(1111); - let req = Request::Headers(request::Headers { - start: 5u64.into(), + + let req = encode_single(Request::Headers(IncompleteHeadersRequest { + start: HashOrNumber::Number(5u64).into(), max: 100, skip: 0, reverse: false, - }); + })); let peer_id = 9876; @@ -579,15 +614,15 @@ fn id_guard() { failed_requests: Vec::new(), })); - // first, supply wrong request type. + // first, malformed responses. { let mut stream = RlpStream::new_list(3); stream.append(&req_id_1.0); stream.append(&4_000_000usize); - stream.begin_list(0); + stream.begin_list(2).append(&125usize).append(&3usize); let packet = stream.out(); - assert!(proto.block_bodies(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); } // next, do an unexpected response. @@ -598,7 +633,7 @@ fn id_guard() { stream.begin_list(0); let packet = stream.out(); - assert!(proto.receipts(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_err()); } // lastly, do a valid (but empty) response. @@ -609,7 +644,7 @@ fn id_guard() { stream.begin_list(0); let packet = stream.out(); - assert!(proto.block_headers(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_ok()); + assert!(proto.response(&peer_id, &Expect::Nothing, UntrustedRlp::new(&packet)).is_ok()); } let peers = proto.peers.read(); diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 383e1a06a..58a6ac717 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -260,7 +260,10 @@ impl Decodable for Request { impl Encodable for Request { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2).append(&self.kind()); + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&self.kind(), s); match *self { Request::Headers(ref req) => s.append(req), @@ -441,7 +444,10 @@ impl Decodable for Response { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2).append(&self.kind()); + s.begin_list(2); + + // hack around https://github.com/ethcore/parity/issues/4356 + Encodable::rlp_append(&self.kind(), s); match *self { Response::Headers(ref res) => s.append(res), @@ -916,7 +922,7 @@ pub mod block_body { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.append_raw(&self.body.rlp().as_raw(), 2); + s.append_raw(&self.body.rlp().as_raw(), 1); } } } @@ -1684,4 +1690,27 @@ mod tests { check_roundtrip(res); check_roundtrip(full_res); } + + #[test] + fn vec_test() { + use rlp::*; + + let reqs: Vec<_> = (0..10).map(|_| IncompleteExecutionRequest { + block_hash: Field::Scalar(Default::default()), + from: Default::default(), + action: ::ethcore::transaction::Action::Create, + gas: 100_000.into(), + gas_price: 0.into(), + value: 100_000_001.into(), + data: vec![1, 2, 3, 2, 1], + }).map(Request::Execution).collect(); + + let mut stream = RlpStream::new_list(2); + stream.append(&100usize).append(&reqs); + let out = stream.out(); + + let rlp = UntrustedRlp::new(&out); + assert_eq!(rlp.val_at::(0).unwrap(), 100usize); + assert_eq!(rlp.val_at::>(1).unwrap(), reqs); + } } From 64342d200c76634e65e461df1ed22ab5da418955 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 9 Mar 2017 17:28:49 +0100 Subject: [PATCH 30/89] return only complete headers responses --- ethcore/light/src/provider.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 36653fe4d..7854330e4 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -62,7 +62,7 @@ pub trait Provider: Send + Sync { HashOrNumber::Number(start_num) => start_num, HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) { None => { - trace!(target: "les_provider", "Unknown block hash {} requested", hash); + trace!(target: "pip_provider", "Unknown block hash {} requested", hash); return None; } Some(header) => { @@ -91,7 +91,11 @@ pub trait Provider: Send + Sync { .flat_map(|x| x) .collect(); - Some(::request::HeadersResponse { headers: headers }) + if headers.is_empty() { + None + } else { + Some(::request::HeadersResponse { headers: headers }) + } } /// Get a block header by id. @@ -182,7 +186,7 @@ impl Provider for T { let cht_number = match cht::block_to_cht_number(req.num) { Some(cht_num) => cht_num, None => { - debug!(target: "les_provider", "Requested CHT proof with invalid block number"); + debug!(target: "pip_provider", "Requested CHT proof with invalid block number"); return None; } }; @@ -230,7 +234,7 @@ impl Provider for T { }), Ok(None) => None, Err(e) => { - debug!(target: "les_provider", "Error looking up number in freshly-created CHT: {}", e); + debug!(target: "pip_provider", "Error looking up number in freshly-created CHT: {}", e); None } } From f0a587d31081bb8d1098d8b2f8bb4f9bde060ab5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 13 Mar 2017 13:36:03 +0100 Subject: [PATCH 31/89] request builder improvements --- ethcore/light/src/provider.rs | 2 +- ethcore/light/src/types/request/builder.rs | 87 +++++++++++++--------- ethcore/light/src/types/request/mod.rs | 9 +++ 3 files changed, 63 insertions(+), 35 deletions(-) diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 7854330e4..aa8869e20 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -82,7 +82,7 @@ pub trait Provider: Send + Sync { } }; - let headers = (0u64..req.max as u64) + let headers: Vec<_> = (0u64..req.max as u64) .map(|x: u64| x.saturating_mul(req.skip + 1)) .take_while(|x| if req.reverse { x < &start_num } else { best_num.saturating_sub(start_num) >= *x }) .map(|x| if req.reverse { start_num - x } else { start_num + x }) diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 867bb6dcc..cdd3a086f 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -21,7 +21,7 @@ use std::collections::HashMap; use request::{ IncompleteRequest, CompleteRequest, Request, - OutputKind, Output, NoSuchOutput, Response, + OutputKind, Output, NoSuchOutput, Response, ResponseError, }; /// Build chained requests. Push them onto the series with `push`, @@ -34,7 +34,7 @@ pub struct RequestBuilder { impl RequestBuilder { /// Attempt to push a request onto the request chain. Fails if the request - /// references a non-existant output of a prior request. + /// references a non-existent output of a prior request. pub fn push(&mut self, request: Request) -> Result<(), NoSuchOutput> { request.check_outputs(|req, idx, kind| { match self.output_kinds.get(&(req, idx)) { @@ -48,13 +48,17 @@ impl RequestBuilder { Ok(()) } + /// Get a reference to the output kinds map. + pub fn output_kinds(&self) -> &HashMap<(usize, usize), OutputKind> { + &self.output_kinds + } + /// Convert this into a "requests" object. pub fn build(self) -> Requests { Requests { - output_kinds: self.output_kinds, outputs: HashMap::new(), requests: self.requests, - offset: 0, + answered: 0, } } } @@ -62,49 +66,27 @@ impl RequestBuilder { /// Requests pending responses. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Requests { - output_kinds: HashMap<(usize, usize), OutputKind>, outputs: HashMap<(usize, usize), Output>, requests: Vec, - offset: usize, // offset for splitting. + answered: usize, } impl Requests { /// For each request, produce responses for each. /// The responses vector produced goes up to the point where the responder /// first returns `None`, an invalid response, or until all requests have been responded to. - pub fn respond_to_all(self, responder: F) -> Vec + pub fn respond_to_all(mut self, responder: F) -> Vec where F: Fn(CompleteRequest) -> Option { let mut responses = Vec::new(); - let mut found_bad = false; - let offset = self.offset; - let output_kinds = self.output_kinds; - let mut outputs = self.outputs; - for (idx, req) in self.requests.into_iter().enumerate().map(|(idx, req)| (idx + offset, req)) { - let complete = req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) - .expect("All outputs checked as invariant of `Requests` object; qed"); - match responder(complete) { - Some(response) => { - response.fill_outputs(|out_idx, output| { - match output_kinds.get(&(idx, out_idx)) { - None => {}, - Some(out) => if out == &output.kind() { - outputs.insert((idx, out_idx), output); - } else { - // output kind doesn't match expected. - found_bad = true; - } - } - }); - - if found_bad { - return responses; - } - - responses.push(response); + while let Some(response) = self.next_complete().and_then(&responder) { + match self.supply_response(&response) { + Ok(()) => responses.push(response), + Err(e) => { + debug!(target: "pip", "produced bad response to request: {:?}", e); + return responses; } - None => return responses, } } @@ -112,7 +94,44 @@ impl Requests { } /// Get access to the underlying slice of requests. + // TODO: unimplemented -> Vec, // do we _have to_ allocate? pub fn requests(&self) -> &[Request] { &self.requests } + + /// Get the number of answered requests. + pub fn num_answered(&self) -> usize { self.answered } + + /// Get the next request as a filled request. Returns `None` when all requests answered. + pub fn next_complete(&self) -> Option { + if self.answered == self.requests.len() { + None + } else { + let outputs = &self.outputs; + Some(self.requests[self.answered].clone() + .fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + .expect("All outputs checked as invariant of `Requests` object; qed")) + } + } + + /// Supply a response for the next request. + /// Fails on: wrong request kind, all requests answered already. + pub fn supply_response(&mut self, response: &Response) -> Result<(), ResponseError> { + let idx = self.answered; + + // check validity. + if idx == self.requests.len() { return Err(ResponseError::Unexpected) } + if self.requests[idx].kind() != response.kind() { return Err(ResponseError::WrongKind) } + + let outputs = &mut self.outputs; + response.fill_outputs(|out_idx, output| { + // we don't need to check output kinds here because all back-references + // are validated in the builder. + // TODO: optimization for only storing outputs we "care about"? + outputs.insert((idx, out_idx), output); + }); + + self.answered += 1; + Ok(()) + } } #[cfg(test)] diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 58a6ac717..165dff742 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -69,6 +69,15 @@ pub use self::builder::{RequestBuilder, Requests}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NoSuchOutput; +/// Error on processing a response. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ResponseError { + /// Wrong kind of response. + WrongKind, + /// No responses expected. + Unexpected, +} + /// An input to a request. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Field { From 8bf5be0cc48ab12e0f9d9e2e924fcfda27aafe6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 13 Mar 2017 15:49:52 +0100 Subject: [PATCH 32/89] New version of jsonrpc. --- Cargo.lock | 132 ++++++++++----------- dapps/Cargo.toml | 32 ++--- dapps/src/api/api.rs | 49 ++++---- dapps/src/lib.rs | 90 +++++++------- dapps/src/router/host_validation.rs | 15 +-- dapps/src/router/mod.rs | 30 ++--- dapps/src/rpc.rs | 45 ++++--- dapps/src/tests/api.rs | 46 +++---- dapps/src/tests/helpers/mod.rs | 38 +++--- dapps/src/tests/redirection.rs | 22 ++-- ipfs/src/lib.rs | 76 +++++------- parity/dapps.rs | 27 ++--- parity/ipfs.rs | 69 +++++++---- parity/rpc.rs | 27 ++--- parity/run.rs | 3 - parity/signer.rs | 17 +-- rpc/Cargo.toml | 13 +- rpc/src/lib.rs | 102 ++++++++-------- rpc/src/v1/tests/mocked/eth.rs | 6 +- rpc/src/v1/tests/mocked/parity.rs | 8 +- rpc/src/v1/tests/mocked/parity_accounts.rs | 2 +- signer/Cargo.toml | 1 + signer/src/lib.rs | 11 +- signer/src/tests/mod.rs | 9 +- signer/src/ws_server/mod.rs | 26 ++-- signer/src/ws_server/session.rs | 21 ++-- stratum/src/lib.rs | 28 +++-- util/reactor/src/lib.rs | 4 +- 28 files changed, 469 insertions(+), 480 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b54bd27db..3877467a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,7 +29,7 @@ dependencies = [ "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -447,8 +447,9 @@ dependencies = [ "fetch 0.1.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -621,10 +622,10 @@ dependencies = [ "ethsync 1.7.0", "fetch 0.1.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-ipc-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", @@ -667,7 +668,8 @@ dependencies = [ "ethcore-io 1.7.0", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-ui 1.7.0", @@ -687,9 +689,9 @@ dependencies = [ "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", @@ -1083,38 +1085,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-http-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-ipc-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1124,26 +1125,44 @@ dependencies = [ [[package]] name = "jsonrpc-macros" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "jsonrpc-tcp-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" +name = "jsonrpc-pubsub" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" dependencies = [ - "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "jsonrpc-server-utils" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" +dependencies = [ + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "jsonrpc-tcp-server" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#b36a57c4bf449c431dc59f0e88236026eda62ea7" +dependencies = [ + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1640,7 +1659,7 @@ dependencies = [ "ethcore 1.7.0", "ethcore-util 1.7.0", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -1677,7 +1696,7 @@ dependencies = [ "ethcore-signer 1.7.0", "ethcore-util 1.7.0", "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2198,11 +2217,6 @@ name = "smallvec" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "smallvec" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "smallvec" version = "0.3.1" @@ -2273,11 +2287,6 @@ dependencies = [ name = "table" version = "0.1.0" -[[package]] -name = "take" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "target_info" version = "0.1.0" @@ -2352,22 +2361,6 @@ dependencies = [ "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "tokio-proto" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "tokio-service" version = "0.1.0" @@ -2626,11 +2619,13 @@ dependencies = [ "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d95557e7ba6b71377b0f2c3b3ae96c53f1b75a926a6901a500f557a370af730a" "checksum itoa 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91fd9dc2c587067de817fec4ad355e3818c3d893a78cab32a0a474c7a15bb8d5" -"checksum jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-ipc-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" @@ -2737,7 +2732,6 @@ dependencies = [ "checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410" -"checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" "checksum smallvec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3c84984c278afe61a46e19868e8b23e2ee3be5b3cc6dea6edad4893bc6c841" "checksum solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "172382bac9424588d7840732b250faeeef88942e37b6e35317dce98cafdd75b2" "checksum spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93bdab61c1a413e591c4d17388ffa859eaff2df27f1e13a5ec8b716700605adf" @@ -2746,7 +2740,6 @@ dependencies = [ "checksum syn 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f94368aae82bb29656c98443a7026ca931a659e8d19dcdc41d6e273054e820" "checksum syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "393b6dd0889df2b064beeea954cfda6bc2571604ac460deeae0fed55a53988af" "checksum syntex_syntax 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44bded3cabafc65c90b663b1071bd2d198a9ab7515e6ce729e4570aaf53c407e" -"checksum take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b157868d8ac1f56b64604539990685fa7611d8fa9e5476cf0c02cf34d32917c5" "checksum target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" "checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" "checksum term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "f2077e54d38055cf1ca0fd7933a2e00cd3ec8f6fed352b2a377f06dcdaaf3281" @@ -2756,7 +2749,6 @@ dependencies = [ "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" "checksum tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "52416b3e937abac22a543a7f1c66bd37feb60137ff1ab42390fa02df85347e58" -"checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" "checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72" diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 508fbc1a0..57fcf21de 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -8,33 +8,37 @@ authors = ["Parity Technologies "] [lib] [dependencies] -rand = "0.3" -log = "0.3" +base32 = "0.3" env_logger = "0.3" futures = "0.1" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } -unicase = "1.3" -url = "1.0" -rustc-serialize = "0.3" -serde = "0.9" -serde_json = "0.9" -serde_derive = "0.9" linked-hash-map = "0.3" -parity-dapps-glue = "1.4" -base32 = "0.3" +log = "0.3" mime = "0.2" mime_guess = "1.6.1" +rand = "0.3" +rustc-serialize = "0.3" +serde = "0.9" +serde_derive = "0.9" +serde_json = "0.9" time = "0.1.35" +unicase = "1.3" +url = "1.0" zip = { version = "0.1", default-features = false } + +hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } +jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } +# TODO [ToDr] Temporary solution, server should be merged with RPC. +jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git" } + ethcore-devtools = { path = "../devtools" } ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } fetch = { path = "../util/fetch" } -parity-ui = { path = "./ui" } +parity-dapps-glue = "1.4" parity-hash-fetch = { path = "../hash-fetch" } parity-reactor = { path = "../util/reactor" } +parity-ui = { path = "./ui" } clippy = { version = "0.0.103", optional = true} diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 9106e0d70..ce8f495e6 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -19,7 +19,6 @@ use unicase::UniCase; use hyper::{server, net, Decoder, Encoder, Next, Control}; use hyper::header; use hyper::method::Method; -use hyper::header::AccessControlAllowOrigin; use api::types::{App, ApiError}; use api::response; @@ -27,23 +26,20 @@ use apps::fetcher::Fetcher; use handlers::extract_url; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; -use jsonrpc_http_server::cors; +use jsonrpc_http_server; +use jsonrpc_server_utils::cors; #[derive(Clone)] pub struct RestApi { - cors_domains: Option>, + cors_domains: Option>, endpoints: Arc, fetcher: Arc, } impl RestApi { - pub fn new(cors_domains: Vec, endpoints: Arc, fetcher: Arc) -> Box { + pub fn new(cors_domains: Vec, endpoints: Arc, fetcher: Arc) -> Box { Box::new(RestApi { - cors_domains: Some(cors_domains.into_iter().map(|domain| match domain.as_ref() { - "all" | "*" | "any" => AccessControlAllowOrigin::Any, - "null" => AccessControlAllowOrigin::Null, - other => AccessControlAllowOrigin::Value(other.into()), - }).collect()), + cors_domains: Some(cors_domains), endpoints: endpoints, fetcher: fetcher, }) @@ -64,7 +60,7 @@ impl Endpoint for RestApi { struct RestApiRouter { api: RestApi, - origin: Option, + cors_header: Option, path: Option, control: Option, handler: Box, @@ -74,7 +70,7 @@ impl RestApiRouter { fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { RestApiRouter { path: Some(path), - origin: None, + cors_header: None, control: Some(control), api: api, handler: response::as_json_error(&ApiError { @@ -95,21 +91,22 @@ impl RestApiRouter { } /// Returns basic headers for a response (it may be overwritten by the handler) - fn response_headers(&self) -> header::Headers { + fn response_headers(cors_header: Option) -> header::Headers { let mut headers = header::Headers::new(); - headers.set(header::AccessControlAllowCredentials); - headers.set(header::AccessControlAllowMethods(vec![ - Method::Options, - Method::Post, - Method::Get, - ])); - headers.set(header::AccessControlAllowHeaders(vec![ - UniCase("origin".to_owned()), - UniCase("content-type".to_owned()), - UniCase("accept".to_owned()), - ])); - if let Some(cors_header) = cors::get_cors_header(&self.api.cors_domains, &self.origin) { + if let Some(cors_header) = cors_header { + headers.set(header::AccessControlAllowCredentials); + headers.set(header::AccessControlAllowMethods(vec![ + Method::Options, + Method::Post, + Method::Get, + ])); + headers.set(header::AccessControlAllowHeaders(vec![ + UniCase("origin".to_owned()), + UniCase("content-type".to_owned()), + UniCase("accept".to_owned()), + ])); + headers.set(cors_header); } @@ -120,7 +117,7 @@ impl RestApiRouter { impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { - self.origin = cors::read_origin(&request); + self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains); if let Method::Options = *request.method() { self.handler = response::empty(); @@ -164,7 +161,7 @@ impl server::Handler for RestApiRouter { } fn on_response(&mut self, res: &mut server::Response) -> Next { - *res.headers_mut() = self.response_headers(); + *res.headers_mut() = Self::response_headers(self.cors_header.take()); self.handler.on_response(res) } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 30c62a031..eca6fd991 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -20,25 +20,28 @@ #![cfg_attr(feature="nightly", plugin(clippy))] extern crate base32; +extern crate futures; extern crate hyper; -extern crate time; -extern crate url as url_lib; -extern crate unicase; +extern crate linked_hash_map; +extern crate mime_guess; +extern crate rand; +extern crate rustc_serialize; extern crate serde; extern crate serde_json; +extern crate time; +extern crate unicase; +extern crate url as url_lib; extern crate zip; -extern crate rand; + extern crate jsonrpc_core; extern crate jsonrpc_http_server; -extern crate mime_guess; -extern crate rustc_serialize; +extern crate jsonrpc_server_utils; + extern crate ethcore_rpc; extern crate ethcore_util as util; -extern crate parity_hash_fetch as hash_fetch; -extern crate linked_hash_map; extern crate fetch; extern crate parity_dapps_glue as parity_dapps; -extern crate futures; +extern crate parity_hash_fetch as hash_fetch; extern crate parity_reactor; #[macro_use] @@ -68,17 +71,19 @@ mod web; mod tests; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use std::net::SocketAddr; use std::collections::HashMap; -use ethcore_rpc::{Metadata}; +use jsonrpc_core::{Middleware, MetaIoHandler}; +use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote; +pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin}; + +use ethcore_rpc::Metadata; use fetch::{Fetch, Client as FetchClient}; use hash_fetch::urlhint::ContractClient; -use jsonrpc_core::Middleware; -use jsonrpc_core::reactor::RpcHandler; -use router::auth::{Authorization, NoAuth, HttpBasicAuth}; use parity_reactor::Remote; +use router::auth::{Authorization, NoAuth, HttpBasicAuth}; use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; @@ -110,8 +115,8 @@ pub struct ServerBuilder { sync_status: Arc, web_proxy_tokens: Arc, signer_address: Option<(String, u16)>, - allowed_hosts: Option>, - extra_cors: Option>, + allowed_hosts: Option>, + extra_cors: Option>, remote: Remote, fetch: Option, } @@ -172,15 +177,15 @@ impl ServerBuilder { /// Change allowed hosts. /// `None` - All hosts are allowed /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) - pub fn allowed_hosts(mut self, allowed_hosts: Option>) -> Self { - self.allowed_hosts = allowed_hosts; + pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { + self.allowed_hosts = allowed_hosts.into(); self } /// Extra cors headers. /// `None` - no additional CORS URLs - pub fn extra_cors_headers(mut self, cors: Option>) -> Self { - self.extra_cors = cors; + pub fn extra_cors_headers(mut self, cors: DomainsValidation) -> Self { + self.extra_cors = cors.into(); self } @@ -192,7 +197,7 @@ impl ServerBuilder { /// Asynchronously start server with no authentication, /// returns result with `Server` handle on success or an error. - pub fn start_unsecured_http>(self, addr: &SocketAddr, handler: RpcHandler) -> Result { + pub fn start_unsecured_http>(self, addr: &SocketAddr, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { let fetch = self.fetch_client()?; Server::start_http( addr, @@ -207,13 +212,14 @@ impl ServerBuilder { self.sync_status, self.web_proxy_tokens, self.remote, + tokio_remote, fetch, ) } /// Asynchronously start server with `HTTP Basic Authentication`, /// return result with `Server` handle on success or an error. - pub fn start_basic_auth_http>(self, addr: &SocketAddr, username: &str, password: &str, handler: RpcHandler) -> Result { + pub fn start_basic_auth_http>(self, addr: &SocketAddr, username: &str, password: &str, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { let fetch = self.fetch_client()?; Server::start_http( addr, @@ -228,6 +234,7 @@ impl ServerBuilder { self.sync_status, self.web_proxy_tokens, self.remote, + tokio_remote, fetch, ) } @@ -243,12 +250,11 @@ impl ServerBuilder { /// Webapps HTTP server. pub struct Server { server: Option, - panic_handler: Arc () + Send>>>>, } impl Server { /// Returns a list of allowed hosts or `None` if all hosts are allowed. - fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { + fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { let mut allowed = Vec::new(); match hosts { @@ -263,16 +269,19 @@ impl Server { } /// Returns a list of CORS domains for API endpoint. - fn cors_domains(signer_address: Option<(String, u16)>, extra_cors: Option>) -> Vec { + fn cors_domains( + signer_address: Option<(String, u16)>, + extra_cors: Option>, + ) -> Vec { let basic_cors = match signer_address { - Some(signer_address) => vec![ + Some(signer_address) => [ format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), format!("http://{}", address(&signer_address)), format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), format!("https://{}", address(&signer_address)), - ], + ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), None => vec![], }; @@ -284,10 +293,10 @@ impl Server { fn start_http>( addr: &SocketAddr, - hosts: Option>, - extra_cors: Option>, + hosts: Option>, + extra_cors: Option>, authorization: A, - handler: RpcHandler, + handler: MetaIoHandler, dapps_path: PathBuf, extra_dapps: Vec, signer_address: Option<(String, u16)>, @@ -295,9 +304,9 @@ impl Server { sync_status: Arc, web_proxy_tokens: Arc, remote: Remote, + tokio_remote: TokioRemote, fetch: F, ) -> Result { - let panic_handler = Arc::new(Mutex::new(None)); let authorization = Arc::new(authorization); let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), @@ -318,7 +327,7 @@ impl Server { let special = Arc::new({ let mut special = HashMap::new(); - special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, cors_domains.clone(), panic_handler.clone())); + special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, tokio_remote, cors_domains.clone())); special.insert(router::SpecialEndpoint::Utils, apps::utils()); special.insert( router::SpecialEndpoint::Api, @@ -346,17 +355,11 @@ impl Server { Server { server: Some(l), - panic_handler: panic_handler, } }) .map_err(ServerError::from) } - /// Set callback for panics. - pub fn set_panic_handler(&self, handler: F) where F : Fn() -> () + Send + 'static { - *self.panic_handler.lock().unwrap() = Some(Box::new(handler)); - } - #[cfg(test)] /// Returns address that this server is bound to. pub fn addr(&self) -> &SocketAddr { @@ -408,6 +411,7 @@ fn address(address: &(String, u16)) -> String { #[cfg(test)] mod util_tests { use super::Server; + use jsonrpc_http_server::AccessControlAllowOrigin; #[test] fn should_return_allowed_hosts() { @@ -432,18 +436,18 @@ mod util_tests { // when let none = Server::cors_domains(None, None); let some = Server::cors_domains(Some(("127.0.0.1".into(), 18180)), None); - let extra = Server::cors_domains(None, Some(vec!["all".to_owned()])); + let extra = Server::cors_domains(None, Some(vec!["all".into()])); // then - assert_eq!(none, Vec::::new()); + assert_eq!(none, Vec::::new()); assert_eq!(some, vec![ - "http://parity.web3.site".to_owned(), + "http://parity.web3.site".into(), "http://parity.web3.site:18180".into(), "http://127.0.0.1:18180".into(), "https://parity.web3.site".into(), "https://parity.web3.site:18180".into(), - "https://127.0.0.1:18180".into() + "https://127.0.0.1:18180".into(), ]); - assert_eq!(extra, vec!["all".to_owned()]); + assert_eq!(extra, vec![AccessControlAllowOrigin::Any]); } } diff --git a/dapps/src/router/host_validation.rs b/dapps/src/router/host_validation.rs index 9f40c177e..e5fcedd94 100644 --- a/dapps/src/router/host_validation.rs +++ b/dapps/src/router/host_validation.rs @@ -19,18 +19,13 @@ use apps::DAPPS_DOMAIN; use hyper::{server, header, StatusCode}; use hyper::net::HttpStream; -use jsonrpc_http_server::{is_host_header_valid}; use handlers::ContentHandler; +use jsonrpc_http_server; +use jsonrpc_server_utils::hosts; -pub fn is_valid(request: &server::Request, allowed_hosts: &[String], endpoints: Vec) -> bool { - let mut endpoints = endpoints.iter() - .map(|endpoint| format!("{}{}", endpoint, DAPPS_DOMAIN)) - .collect::>(); - endpoints.extend_from_slice(allowed_hosts); - - let header_valid = is_host_header_valid(request, &endpoints); - - match (header_valid, request.headers().get::()) { +pub fn is_valid(req: &server::Request, allowed_hosts: &Option>) -> bool { + let header_valid = jsonrpc_http_server::is_host_allowed(req, allowed_hosts); + match (header_valid, req.headers().get::()) { (true, _) => true, (_, Some(host)) => host.hostname.ends_with(DAPPS_DOMAIN), _ => false, diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index f34151552..0b4e632a6 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -24,14 +24,16 @@ use address; use std::cmp; use std::sync::Arc; use std::collections::HashMap; + use url::{Url, Host}; use hyper::{self, server, header, Next, Encoder, Decoder, Control, StatusCode}; use hyper::net::HttpStream; +use jsonrpc_server_utils::hosts; + use apps::{self, DAPPS_DOMAIN}; use apps::fetcher::Fetcher; use endpoint::{Endpoint, Endpoints, EndpointPath}; use handlers::{self, Redirection, ContentHandler}; -use self::auth::{Authorization, Authorized}; /// Special endpoints are accessible on every domain (every dapp) #[derive(Debug, PartialEq, Hash, Eq)] @@ -42,18 +44,18 @@ pub enum SpecialEndpoint { None, } -pub struct Router { +pub struct Router { control: Option, signer_address: Option<(String, u16)>, endpoints: Arc, fetch: Arc, special: Arc>>, authorization: Arc, - allowed_hosts: Option>, + allowed_hosts: Option>, handler: Box + Send>, } -impl server::Handler for Router { +impl server::Handler for Router { fn on_request(&mut self, req: server::Request) -> Next { // Choose proper handler depending on path / domain @@ -66,20 +68,18 @@ impl server::Handler for Router { trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); // Validate Host header - if let Some(ref hosts) = self.allowed_hosts { - trace!(target: "dapps", "Validating host headers against: {:?}", hosts); - let is_valid = is_utils || host_validation::is_valid(&req, hosts, self.endpoints.keys().cloned().collect()); - if !is_valid { - debug!(target: "dapps", "Rejecting invalid host header."); - self.handler = host_validation::host_invalid_response(); - return self.handler.on_request(req); - } + trace!(target: "dapps", "Validating host headers against: {:?}", self.allowed_hosts); + let is_valid = is_utils || host_validation::is_valid(&req, &self.allowed_hosts); + if !is_valid { + debug!(target: "dapps", "Rejecting invalid host header."); + self.handler = host_validation::host_invalid_response(); + return self.handler.on_request(req); } trace!(target: "dapps", "Checking authorization."); // Check authorization let auth = self.authorization.is_authorized(&req); - if let Authorized::No(handler) = auth { + if let auth::Authorized::No(handler) = auth { debug!(target: "dapps", "Authorization denied."); self.handler = handler; return self.handler.on_request(req); @@ -181,7 +181,7 @@ impl server::Handler for Router { } } -impl Router { +impl Router { pub fn new( control: Control, signer_address: Option<(String, u16)>, @@ -189,7 +189,7 @@ impl Router { endpoints: Arc, special: Arc>>, authorization: Arc, - allowed_hosts: Option>, + allowed_hosts: Option>, ) -> Self { let handler = special.get(&SpecialEndpoint::Utils) diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index cc6f4d81a..0c95051e4 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -14,46 +14,57 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use hyper; use ethcore_rpc::{Metadata, Origin}; -use jsonrpc_core::Middleware; -use jsonrpc_core::reactor::RpcHandler; -use jsonrpc_http_server::{Rpc, ServerHandler, PanicHandler, AccessControlAllowOrigin, HttpMetaExtractor}; +use jsonrpc_core::{Middleware, MetaIoHandler}; +use jsonrpc_http_server::{self as http, AccessControlAllowOrigin, HttpMetaExtractor}; +use jsonrpc_http_server::tokio_core::reactor::Remote; use endpoint::{Endpoint, EndpointPath, Handler}; pub fn rpc>( - handler: RpcHandler, - cors_domains: Vec, - panic_handler: Arc () + Send>>>>, + handler: MetaIoHandler, + remote: Remote, + cors_domains: Vec, ) -> Box { Box::new(RpcEndpoint { - handler: handler, + handler: Arc::new(handler), + remote: remote, meta_extractor: Arc::new(MetadataExtractor), - panic_handler: panic_handler, - cors_domain: Some(cors_domains.into_iter().map(AccessControlAllowOrigin::Value).collect()), + cors_domain: Some(cors_domains), // NOTE [ToDr] We don't need to do any hosts validation here. It's already done in router. allowed_hosts: None, }) } struct RpcEndpoint> { - handler: RpcHandler, + handler: Arc>, + remote: Remote, meta_extractor: Arc>, - panic_handler: Arc () + Send>>>>, cors_domain: Option>, - allowed_hosts: Option>, + allowed_hosts: Option>, +} + +#[derive(Default)] +struct NoopMiddleware; +impl http::RequestMiddleware for NoopMiddleware { + fn on_request(&self, _request: &hyper::server::Request) -> http::RequestMiddlewareAction { + http::RequestMiddlewareAction::Proceed + } } impl> Endpoint for RpcEndpoint { fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box { - let panic_handler = PanicHandler { handler: self.panic_handler.clone() }; - Box::new(ServerHandler::new( - Rpc::new(self.handler.clone(), self.meta_extractor.clone()), + Box::new(http::ServerHandler::new( + http::Rpc { + handler: self.handler.clone(), + remote: self.remote.clone(), + extractor: self.meta_extractor.clone(), + }, self.cors_domain.clone(), self.allowed_hosts.clone(), - panic_handler, + Arc::new(NoopMiddleware), control, )) } diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index 1b9f64b7f..73467e854 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -33,8 +33,8 @@ fn should_return_error() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); - assert_eq!(response.headers.get(3).unwrap(), "Content-Type: application/json"); + response.assert_status("HTTP/1.1 404 Not Found"); + response.assert_header("Content-Type", "application/json"); assert_eq!(response.body, format!("58\n{}\n0\n\n", r#"{"code":"404","title":"Not Found","detail":"Resource you requested has not been found."}"#)); assert_security_headers(&response.headers); } @@ -56,8 +56,8 @@ fn should_serve_apps() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.headers.get(3).unwrap(), "Content-Type: application/json"); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Content-Type", "application/json"); assert!(response.body.contains("Parity UI"), response.body); assert_security_headers(&response.headers); } @@ -79,8 +79,8 @@ fn should_handle_ping() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.headers.get(3).unwrap(), "Content-Type: application/json"); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Content-Type", "application/json"); assert_eq!(response.body, "0\n\n".to_owned()); assert_security_headers(&response.headers); } @@ -102,7 +102,7 @@ fn should_try_to_resolve_dapp() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + response.assert_status("HTTP/1.1 404 Not Found"); assert_eq!(registrar.calls.lock().len(), 2); assert_security_headers(&response.headers); } @@ -125,12 +125,8 @@ fn should_return_signer_port_cors_headers() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: http://127.0.0.1:18180"), - "CORS header for signer missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "http://127.0.0.1:18180"); } #[test] @@ -151,12 +147,8 @@ fn should_return_signer_port_cors_headers_for_home_parity() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: http://parity.web3.site"), - "CORS header for parity.web3.site missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site"); } @@ -178,12 +170,8 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_https() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: https://parity.web3.site"), - "CORS header for parity.web3.site missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "https://parity.web3.site"); } #[test] @@ -204,12 +192,8 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_port() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - response.headers_raw.contains("Access-Control-Allow-Origin: http://parity.web3.site:18180"), - "CORS header for parity.web3.site missing: {:?}", - response.headers - ); + response.assert_status("HTTP/1.1 200 OK"); + response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180"); } #[test] diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index d1a1e9900..d1466c77c 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -21,13 +21,12 @@ use std::sync::Arc; use env_logger::LogBuilder; use ethcore_rpc::Metadata; use jsonrpc_core::MetaIoHandler; -use jsonrpc_core::reactor::RpcEventLoop; use ServerBuilder; use Server; use fetch::Fetch; use devtools::http_client; -use parity_reactor::Remote; +use parity_reactor::{EventLoop, Remote}; mod registrar; mod fetch; @@ -48,7 +47,7 @@ fn init_logger() { pub struct ServerLoop { pub server: Server, - pub event_loop: RpcEventLoop, + pub event_loop: EventLoop, } impl Deref for ServerLoop { @@ -70,13 +69,12 @@ pub fn init_server(process: F, io: MetaIoHandler, remote: Remote // TODO [ToDr] When https://github.com/ethcore/jsonrpc/issues/26 is resolved // this additional EventLoop wouldn't be needed, we should be able to re-use remote. - let event_loop = RpcEventLoop::spawn(); - let handler = event_loop.handler(Arc::new(io)); + let event_loop = EventLoop::spawn(); let server = process(ServerBuilder::new( &dapps_path, registrar.clone(), remote, )) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), handler).unwrap(); + .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io, event_loop.raw_remote()).unwrap(); ( ServerLoop { server: server, event_loop: event_loop }, registrar, @@ -89,12 +87,12 @@ pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { let mut dapps_path = env::temp_dir(); dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); - let event_loop = RpcEventLoop::spawn(); - let handler = event_loop.handler(Arc::new(MetaIoHandler::default())); - let server = ServerBuilder::new(&dapps_path, registrar, Remote::new(event_loop.remote())) + let event_loop = EventLoop::spawn(); + let io = MetaIoHandler::default(); + let server = ServerBuilder::new(&dapps_path, registrar, event_loop.remote()) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .allowed_hosts(None) - .start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, handler).unwrap(); + .allowed_hosts(None.into()) + .start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, io, event_loop.raw_remote()).unwrap(); ServerLoop { server: server, event_loop: event_loop, @@ -102,26 +100,28 @@ pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { } pub fn serve_with_rpc(io: MetaIoHandler) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None), io, Remote::new_sync()).0 + init_server(|builder| builder.allowed_hosts(None.into()), io, Remote::new_sync()).0 } pub fn serve_hosts(hosts: Option>) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(hosts), Default::default(), Remote::new_sync()).0 + let hosts = hosts.map(|hosts| hosts.into_iter().map(Into::into).collect()); + init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 } pub fn serve_extra_cors(extra_cors: Option>) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None).extra_cors_headers(extra_cors), Default::default(), Remote::new_sync()).0 + let extra_cors = extra_cors.map(|cors| cors.into_iter().map(Into::into).collect()); + init_server(|builder| builder.allowed_hosts(None.into()).extra_cors_headers(extra_cors.into()), Default::default(), Remote::new_sync()).0 } pub fn serve_with_registrar() -> (ServerLoop, Arc) { - init_server(|builder| builder.allowed_hosts(None), Default::default(), Remote::new_sync()) + init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()) } pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc) { init_server(|builder| { builder .sync_status(Arc::new(|| true)) - .allowed_hosts(None) + .allowed_hosts(None.into()) }, Default::default(), Remote::new_sync()) } @@ -133,7 +133,7 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, reg) = init_server(move |builder| { - builder.allowed_hosts(None).fetch(f.clone()) + builder.allowed_hosts(None.into()).fetch(f.clone()) }, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); (server, fetch, reg) @@ -144,7 +144,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { let f = fetch.clone(); let (server, _) = init_server(move |builder| { builder - .allowed_hosts(None) + .allowed_hosts(None.into()) .fetch(f.clone()) .web_proxy_tokens(Arc::new(move |token| &token == web_token)) }, Default::default(), Remote::new_sync()); @@ -153,7 +153,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { } pub fn serve() -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None), Default::default(), Remote::new_sync()).0 + init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()).0 } pub fn request(server: ServerLoop, request: &str) -> http_client::Response { diff --git a/dapps/src/tests/redirection.rs b/dapps/src/tests/redirection.rs index 8b529a851..4e3fff4dc 100644 --- a/dapps/src/tests/redirection.rs +++ b/dapps/src/tests/redirection.rs @@ -32,7 +32,7 @@ fn should_redirect_to_home() { ); // then - assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + response.assert_status("HTTP/1.1 302 Found"); assert_eq!(response.headers.get(0).unwrap(), "Location: http://127.0.0.1:18180"); } @@ -52,7 +52,7 @@ fn should_redirect_to_home_when_trailing_slash_is_missing() { ); // then - assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + response.assert_status("HTTP/1.1 302 Found"); assert_eq!(response.headers.get(0).unwrap(), "Location: http://127.0.0.1:18180"); } @@ -72,7 +72,7 @@ fn should_redirect_to_home_for_users_with_cached_redirection() { ); // then - assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + response.assert_status("HTTP/1.1 302 Found"); assert_eq!(response.headers.get(0).unwrap(), "Location: http://127.0.0.1:18180"); } @@ -92,7 +92,7 @@ fn should_display_404_on_invalid_dapp() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + response.assert_status("HTTP/1.1 404 Not Found"); assert_security_headers_for_embed(&response.headers); } @@ -112,7 +112,7 @@ fn should_display_404_on_invalid_dapp_with_domain() { ); // then - assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + response.assert_status("HTTP/1.1 404 Not Found"); assert_security_headers_for_embed(&response.headers); } @@ -134,8 +134,8 @@ fn should_serve_rpc() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); + response.assert_status("HTTP/1.1 200 OK"); + assert_eq!(response.body, format!("4C\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error"},"id":null}"#)); } #[test] @@ -156,8 +156,8 @@ fn should_serve_rpc_at_slash_rpc() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); + response.assert_status("HTTP/1.1 200 OK"); + assert_eq!(response.body, format!("4C\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error"},"id":null}"#)); } @@ -178,7 +178,7 @@ fn should_serve_proxy_pac() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + response.assert_status("HTTP/1.1 200 OK"); assert_eq!(response.body, "DD\n\nfunction FindProxyForURL(url, host) {\n\tif (shExpMatch(host, \"parity.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:18180\";\n\t}\n\n\tif (shExpMatch(host, \"*.web3.site\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:8080\";\n\t}\n\n\treturn \"DIRECT\";\n}\n\n0\n\n".to_owned()); assert_security_headers(&response.headers); } @@ -200,7 +200,7 @@ fn should_serve_utils() { ); // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + response.assert_status("HTTP/1.1 200 OK"); assert_eq!(response.body.contains("function(){"), true); assert_security_headers(&response.headers); } diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 80e910f9e..287abb27c 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -33,13 +33,13 @@ use std::sync::Arc; use std::net::{SocketAddr, IpAddr}; use error::ServerError; use route::Out; -use jsonrpc_http_server::cors; use hyper::server::{Listening, Handler, Request, Response}; use hyper::net::HttpStream; -use hyper::header::{Vary, ContentLength, ContentType, AccessControlAllowOrigin}; +use hyper::header::{self, Vary, ContentLength, ContentType}; use hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use ethcore::client::BlockChainClient; +pub use jsonrpc_http_server::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler pub struct IpfsHandler { @@ -47,12 +47,12 @@ pub struct IpfsHandler { out: Out, /// How many bytes from the response have been written out_progress: usize, - /// Origin request header - origin: Option, + /// CORS response header + cors_header: Option, /// Allowed CORS domains cors_domains: Option>, /// Hostnames allowed in the `Host` request header - allowed_hosts: Option>, + allowed_hosts: Option>, /// Reference to the Blockchain Client client: Arc, } @@ -62,50 +62,27 @@ impl IpfsHandler { &*self.client } - pub fn new(cors: Option>, hosts: Option>, client: Arc) -> Self { - fn origin_to_header(origin: String) -> AccessControlAllowOrigin { - match origin.as_str() { - "*" => AccessControlAllowOrigin::Any, - "null" | "" => AccessControlAllowOrigin::Null, - _ => AccessControlAllowOrigin::Value(origin), - } - } - + pub fn new(cors: DomainsValidation, hosts: DomainsValidation, client: Arc) -> Self { IpfsHandler { out: Out::Bad("Invalid Request"), out_progress: 0, - origin: None, - cors_domains: cors.map(|vec| vec.into_iter().map(origin_to_header).collect()), - allowed_hosts: hosts, + cors_header: None, + cors_domains: cors.into(), + allowed_hosts: hosts.into(), client: client, } } - fn is_host_allowed(&self, req: &Request) -> bool { - match self.allowed_hosts { - Some(ref hosts) => jsonrpc_http_server::is_host_header_valid(&req, hosts), - None => true, + fn is_origin_allowed(&self, origin_provided: bool) -> bool { + match (origin_provided, self.cors_header.as_ref()) { + // Request without Origin are always OK. + (false, _) => true, + // If there is a cors header to be returned it's ok. + (true, Some(_)) => true, + // If origin is provided and we won't return cors header it's bad. + (true, None) => false, } } - - fn is_origin_allowed(&self) -> bool { - // Check origin header first, no header passed is good news - let origin = match self.origin { - Some(ref origin) => origin, - None => return true, - }; - - let cors_domains = match self.cors_domains { - Some(ref domains) => domains, - None => return false, - }; - - cors_domains.iter().any(|domain| match *domain { - AccessControlAllowOrigin::Value(ref allowed) => origin == allowed, - AccessControlAllowOrigin::Any => true, - AccessControlAllowOrigin::Null => origin == "", - }) - } } /// Implement Hyper's HTTP handler @@ -115,15 +92,15 @@ impl Handler for IpfsHandler { return Next::write(); } - self.origin = cors::read_origin(&req); + self.cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); - if !self.is_host_allowed(&req) { + if !jsonrpc_http_server::is_host_allowed(&req, &self.allowed_hosts) { self.out = Out::Bad("Disallowed Host header"); return Next::write(); } - if !self.is_origin_allowed() { + if !self.is_origin_allowed(req.headers().get::().is_some()) { self.out = Out::Bad("Disallowed Origin header"); return Next::write(); @@ -176,7 +153,7 @@ impl Handler for IpfsHandler { } } - if let Some(cors_header) = cors::get_cors_header(&self.cors_domains, &self.origin) { + if let Some(cors_header) = self.cors_header.take() { res.headers_mut().set(cors_header); res.headers_mut().set(Vary::Items(vec!["Origin".into()])); } @@ -219,11 +196,11 @@ fn write_chunk(transport: &mut W, progress: &mut usize, data: &[u8]) - } /// Add current interface (default: "127.0.0.1:5001") to list of allowed hosts -fn include_current_interface(mut hosts: Vec, interface: String, port: u16) -> Vec { +fn include_current_interface(mut hosts: Vec, interface: String, port: u16) -> Vec { hosts.push(match port { 80 => interface, _ => format!("{}:{}", interface, port), - }); + }.into()); hosts } @@ -231,14 +208,15 @@ fn include_current_interface(mut hosts: Vec, interface: String, port: u1 pub fn start_server( port: u16, interface: String, - cors: Option>, - hosts: Option>, + cors: DomainsValidation, + hosts: DomainsValidation, client: Arc ) -> Result { let ip: IpAddr = interface.parse().map_err(|_| ServerError::InvalidInterface)?; let addr = SocketAddr::new(ip, port); - let hosts = hosts.map(move |hosts| include_current_interface(hosts, interface, port)); + let hosts: Option> = hosts.into(); + let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); Ok( hyper::Server::http(&addr)? diff --git a/parity/dapps.rs b/parity/dapps.rs index b9094c16d..bbd5f4960 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -23,9 +23,8 @@ use ethcore_rpc::informant::RpcStats; use ethsync::SyncProvider; use hash_fetch::fetch::Client as FetchClient; use helpers::replace_home; -use io::PanicHandler; -use jsonrpc_core::reactor::Remote; use rpc_apis::{self, SignerService}; +use parity_reactor; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { @@ -60,11 +59,10 @@ impl Default for Configuration { } pub struct Dependencies { - pub panic_handler: Arc, pub apis: Arc, pub client: Arc, pub sync: Arc, - pub remote: Remote, + pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, pub stats: Arc, @@ -137,9 +135,9 @@ mod server { use ansi_term::Colour; use ethcore::transaction::{Transaction, Action}; use ethcore::client::{Client, BlockChainClient, BlockId}; + use ethcore_dapps::{AccessControlAllowOrigin, Host}; use ethcore_rpc::is_major_importing; use hash_fetch::urlhint::ContractClient; - use jsonrpc_core::reactor::RpcHandler; use parity_reactor; use rpc_apis; @@ -162,6 +160,8 @@ mod server { Arc::new(Registrar { client: deps.client.clone() }), parity_reactor::Remote::new(deps.remote.clone()), ); + let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + let cors: Option> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); let sync = deps.sync.clone(); let client = deps.client.clone(); @@ -172,8 +172,8 @@ mod server { .web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token))) .extra_dapps(&extra_dapps) .signer_address(deps.signer.address()) - .allowed_hosts(allowed_hosts) - .extra_cors_headers(cors); + .allowed_hosts(allowed_hosts.into()) + .extra_cors_headers(cors.into()); let api_set = if all_apis { warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Dapps with all APIs exposed.")); @@ -183,13 +183,12 @@ mod server { rpc_apis::ApiSet::UnsafeContext }; let apis = rpc_apis::setup_rpc(deps.stats, deps.apis.clone(), api_set); - let handler = RpcHandler::new(Arc::new(apis), deps.remote); let start_result = match auth { None => { - server.start_unsecured_http(url, handler) + server.start_unsecured_http(url, apis, deps.remote) }, Some((username, password)) => { - server.start_basic_auth_http(url, &username, &password, handler) + server.start_basic_auth_http(url, &username, &password, apis, deps.remote) }, }; @@ -199,13 +198,7 @@ mod server { _ => Err(format!("WebApps io error: {}", err)), }, Err(e) => Err(format!("WebApps error: {:?}", e)), - Ok(server) => { - let ph = deps.panic_handler; - server.set_panic_handler(move || { - ph.notify_all("Panic in WebApp thread.".to_owned()); - }); - Ok(server) - }, + Ok(server) => Ok(server), } } diff --git a/parity/ipfs.rs b/parity/ipfs.rs index e33dcf68b..760868f91 100644 --- a/parity/ipfs.rs +++ b/parity/ipfs.rs @@ -1,40 +1,59 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + use std::sync::Arc; -use parity_ipfs_api; +use parity_ipfs_api::{self, AccessControlAllowOrigin, Host}; use parity_ipfs_api::error::ServerError; use ethcore::client::BlockChainClient; use hyper::server::Listening; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { - pub enabled: bool, - pub port: u16, - pub interface: String, - pub cors: Option>, - pub hosts: Option>, + pub enabled: bool, + pub port: u16, + pub interface: String, + pub cors: Option>, + pub hosts: Option>, } impl Default for Configuration { - fn default() -> Self { - Configuration { - enabled: false, - port: 5001, - interface: "127.0.0.1".into(), - cors: None, - hosts: Some(Vec::new()), - } - } + fn default() -> Self { + Configuration { + enabled: false, + port: 5001, + interface: "127.0.0.1".into(), + cors: None, + hosts: Some(Vec::new()), + } + } } pub fn start_server(conf: Configuration, client: Arc) -> Result, ServerError> { - if !conf.enabled { - return Ok(None); - } + if !conf.enabled { + return Ok(None); + } - parity_ipfs_api::start_server( - conf.port, - conf.interface, - conf.cors, - conf.hosts, - client - ).map(Some) + let cors = conf.cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); + let hosts = conf.hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + + parity_ipfs_api::start_server( + conf.port, + conf.interface, + cors.into(), + hosts.into(), + client + ).map(Some) } diff --git a/parity/rpc.rs b/parity/rpc.rs index 49bd94699..b0af8aa0b 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -18,19 +18,18 @@ use std::fmt; use std::sync::Arc; use std::net::SocketAddr; use std::io; -use io::PanicHandler; use dir::default_data_path; -use ethcore_rpc::{self as rpc, RpcServerError, IpcServerError, Metadata, Origin}; +use ethcore_rpc::{self as rpc, HttpServerError, IpcServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use ethcore_rpc::informant::{RpcStats, Middleware}; use helpers::parity_ipc_path; use hyper; use jsonrpc_core::MetaIoHandler; -use jsonrpc_core::reactor::{RpcHandler, Remote}; use rpc_apis; use rpc_apis::ApiSet; +use parity_reactor::TokioRemote; -pub use ethcore_rpc::{IpcServer, Server as HttpServer}; +pub use ethcore_rpc::{IpcServer, HttpServer}; #[derive(Debug, PartialEq)] pub struct HttpConfiguration { @@ -84,9 +83,8 @@ impl fmt::Display for IpcConfiguration { } pub struct Dependencies { - pub panic_handler: Arc, pub apis: Arc, - pub remote: Remote, + pub remote: TokioRemote, pub stats: Arc, } @@ -123,12 +121,13 @@ pub fn setup_http_rpc_server( allowed_hosts: Option>, apis: ApiSet ) -> Result { - let apis = setup_apis(apis, dependencies); - let handler = RpcHandler::new(Arc::new(apis), dependencies.remote.clone()); - let ph = dependencies.panic_handler.clone(); - let start_result = rpc::start_http(url, cors_domains, allowed_hosts, ph, handler, RpcExtractor); + let handler = setup_apis(apis, dependencies); + let remote = dependencies.remote.clone(); + let cors_domains: Option> = cors_domains.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); + let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + let start_result = rpc::start_http(url, cors_domains.into(), allowed_hosts.into(), handler, remote, RpcExtractor); match start_result { - Err(RpcServerError::IoError(err)) => match err.kind() { + Err(HttpServerError::IoError(err)) => match err.kind() { io::ErrorKind::AddrInUse => Err(format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)), _ => Err(format!("RPC io error: {}", err)), }, @@ -143,9 +142,9 @@ pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result Result, String> { - let apis = setup_apis(apis, dependencies); - let handler = RpcHandler::new(Arc::new(apis), dependencies.remote.clone()); - match rpc::start_ipc(addr, handler) { + let handler = setup_apis(apis, dependencies); + let remote = dependencies.remote.clone(); + match rpc::start_ipc(addr, handler, remote) { Err(IpcServerError::Io(io_error)) => Err(format!("RPC io error: {}", io_error)), Err(any_error) => Err(format!("Rpc error: {:?}", any_error)), Ok(server) => Ok(server) diff --git a/parity/run.rs b/parity/run.rs index b25ed3188..9e5c4f33a 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -434,7 +434,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R }); let dependencies = rpc::Dependencies { - panic_handler: panic_handler.clone(), apis: deps_for_rpc_apis.clone(), remote: event_loop.raw_remote(), stats: rpc_stats.clone(), @@ -446,7 +445,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // the dapps server let dapps_deps = dapps::Dependencies { - panic_handler: panic_handler.clone(), apis: deps_for_rpc_apis.clone(), client: client.clone(), sync: sync_provider.clone(), @@ -459,7 +457,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // the signer server let signer_deps = signer::Dependencies { - panic_handler: panic_handler.clone(), apis: deps_for_rpc_apis.clone(), remote: event_loop.raw_remote(), rpc_stats: rpc_stats.clone(), diff --git a/parity/signer.rs b/parity/signer.rs index 346276496..0d71604d4 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -26,8 +26,7 @@ use ethcore_rpc::informant::RpcStats; use ethcore_rpc; use ethcore_signer as signer; use helpers::replace_home; -use io::{ForwardPanic, PanicHandler}; -use jsonrpc_core::reactor::{RpcHandler, Remote}; +use parity_reactor::TokioRemote; use rpc_apis; use util::path::restrict_permissions_owner; use util::H256; @@ -57,9 +56,8 @@ impl Default for Configuration { } pub struct Dependencies { - pub panic_handler: Arc, pub apis: Arc, - pub remote: Remote, + pub remote: TokioRemote, pub rpc_stats: Arc, } @@ -143,9 +141,9 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result Result Err(format!("Trusted Signer io error: {}", err)), }, Err(e) => Err(format!("Trusted Signer Error: {:?}", e)), - Ok(server) => { - deps.panic_handler.forward_from(&server); - Ok(server) - }, + Ok(server) => Ok(server), } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 13ce8962f..e31255254 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -10,18 +10,20 @@ authors = ["Parity Technologies "] [dependencies] futures = "0.1" log = "0.3" +order-stat = "0.1" +rustc-serialize = "0.3" semver = "0.5" serde = "0.9" -serde_json = "0.9" serde_derive = "0.9" -rustc-serialize = "0.3" +serde_json = "0.9" time = "0.1" transient-hashmap = "0.1" -order-stat = "0.1" + jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-ipc-server = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git" } + ethcore-io = { path = "../util/io" } ethcore-ipc = { path = "../ipc/rpc" } ethcore-util = { path = "../util" } @@ -35,11 +37,12 @@ ethjson = { path = "../json" } ethcore-devtools = { path = "../devtools" } ethcore-light = { path = "../ethcore/light" } parity-updater = { path = "../updater" } +parity-reactor = { path = "../util/reactor" } rlp = { path = "../util/rlp" } fetch = { path = "../util/fetch" } -parity-reactor = { path = "../util/reactor" } -clippy = { version = "0.0.103", optional = true} stats = { path = "../util/stats" } +clippy = { version = "0.0.103", optional = true} + [features] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 201f41c22..7a9ee5a22 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -19,31 +19,32 @@ #![cfg_attr(feature="nightly", feature(plugin))] #![cfg_attr(feature="nightly", plugin(clippy))] -extern crate semver; -extern crate rustc_serialize; -extern crate serde; -extern crate serde_json; -extern crate jsonrpc_core; -extern crate jsonrpc_http_server; - -extern crate ethcore_io as io; -extern crate ethcore; -extern crate ethkey; -extern crate ethcrypto as crypto; -extern crate ethstore; -extern crate ethsync; -extern crate ethash; -extern crate ethcore_light as light; -extern crate transient_hashmap; -extern crate jsonrpc_ipc_server as ipc; -extern crate ethcore_ipc; -extern crate time; -extern crate rlp; -extern crate fetch; extern crate futures; extern crate order_stat; -extern crate parity_updater as updater; +extern crate rustc_serialize; +extern crate semver; +extern crate serde; +extern crate serde_json; +extern crate time; +extern crate transient_hashmap; + +extern crate jsonrpc_core; +pub extern crate jsonrpc_http_server as http; +pub extern crate jsonrpc_ipc_server as ipc; + +extern crate ethash; +extern crate ethcore; +extern crate ethcore_io as io; +extern crate ethcore_ipc; +extern crate ethcore_light as light; +extern crate ethcrypto as crypto; +extern crate ethkey; +extern crate ethstore; +extern crate ethsync; +extern crate fetch; extern crate parity_reactor; +extern crate parity_updater as updater; +extern crate rlp; extern crate stats; #[macro_use] @@ -60,57 +61,50 @@ extern crate ethjson; #[cfg(test)] extern crate ethcore_devtools as devtools; -use std::sync::Arc; -use std::net::SocketAddr; -use io::PanicHandler; -use jsonrpc_core::reactor::RpcHandler; +pub mod v1; pub use ipc::{Server as IpcServer, Error as IpcServerError}; -pub use jsonrpc_http_server::{ServerBuilder, Server, RpcServerError, HttpMetaExtractor}; -pub mod v1; +pub use http::{HttpMetaExtractor, Server as HttpServer, Error as HttpServerError, AccessControlAllowOrigin, Host}; + pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; pub use v1::block_import::is_major_importing; +use std::net::SocketAddr; +use http::tokio_core; + /// Start http server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_http( +pub fn start_http( addr: &SocketAddr, - cors_domains: Option>, - allowed_hosts: Option>, - panic_handler: Arc, - handler: RpcHandler, + cors_domains: http::DomainsValidation, + allowed_hosts: http::DomainsValidation, + handler: H, + remote: tokio_core::reactor::Remote, extractor: T, -) -> Result where +) -> Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, + H: Into>, T: HttpMetaExtractor, { - - let cors_domains = cors_domains.map(|domains| { - domains.into_iter() - .map(|v| match v.as_str() { - "*" => jsonrpc_http_server::AccessControlAllowOrigin::Any, - "null" => jsonrpc_http_server::AccessControlAllowOrigin::Null, - v => jsonrpc_http_server::AccessControlAllowOrigin::Value(v.into()), - }) - .collect() - }); - - ServerBuilder::with_rpc_handler(handler) - .meta_extractor(Arc::new(extractor)) + http::ServerBuilder::new(handler) + .event_loop_remote(remote) + .meta_extractor(extractor) .cors(cors_domains.into()) .allowed_hosts(allowed_hosts.into()) - .panic_handler(move || { - panic_handler.notify_all("Panic in RPC thread.".to_owned()); - }) .start_http(addr) } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_ipc>( +pub fn start_ipc( addr: &str, - handler: RpcHandler, -) -> Result, ipc::Error> { - let server = ipc::Server::with_rpc_handler(addr, handler)?; + handler: H, + remote: tokio_core::reactor::Remote, +) -> Result, ipc::Error> where + M: jsonrpc_core::Metadata, + S: jsonrpc_core::Middleware, + H: Into>, +{ + let server = ipc::Server::with_remote(addr, handler, ipc::UninitializedRemote::Shared(remote))?; server.run_async()?; Ok(server) } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 2432b55e7..f6687f818 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -875,7 +875,7 @@ fn rpc_eth_send_transaction_with_bad_to() { "id": 1 }"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid length.","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid length."},"id":1}"#; assert_eq!(tester.io.handle_request_sync(&request), Some(response.into())); } @@ -1058,7 +1058,7 @@ fn rpc_get_work_returns_no_work_if_cant_mine() { eth_tester.client.set_queue_size(10); let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing.","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32001,"message":"Still syncing."},"id":1}"#; assert_eq!(eth_tester.io.handle_request_sync(request), Some(response.to_owned())); } @@ -1117,6 +1117,6 @@ fn rpc_get_work_should_timeout() { // Request with timeout of 10 seconds. This should fail. let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": ["10"], "id": 1}"#; - let err_response = r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Work has not changed.","data":null},"id":1}"#; + let err_response = r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Work has not changed."},"id":1}"#; assert_eq!(eth_tester.io.handle_request_sync(request), Some(err_response.to_owned())); } diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index a587554a3..082f7ef34 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -346,7 +346,7 @@ fn rpc_parity_unsigned_transactions_count_when_signer_disabled() { let io = deps.default_client(); let request = r#"{"jsonrpc": "2.0", "method": "parity_unsignedTransactionsCount", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available.","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available."},"id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } @@ -382,7 +382,7 @@ fn rpc_parity_signer_port() { // when let request = r#"{"jsonrpc": "2.0", "method": "parity_signerPort", "params": [], "id": 1}"#; let response1 = r#"{"jsonrpc":"2.0","result":18180,"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available.","data":null},"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available."},"id":1}"#; // then assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); @@ -400,7 +400,7 @@ fn rpc_parity_dapps_port() { // when let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsPort", "params": [], "id": 1}"#; let response1 = r#"{"jsonrpc":"2.0","result":18080,"id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available.","data":null},"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#; // then assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); @@ -418,7 +418,7 @@ fn rpc_parity_dapps_interface() { // when let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsInterface", "params": [], "id": 1}"#; let response1 = r#"{"jsonrpc":"2.0","result":"127.0.0.1","id":1}"#; - let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available.","data":null},"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","error":{"code":-32031,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#; // then assert_eq!(io1.handle_request_sync(request), Some(response1.to_owned())); diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/rpc/src/v1/tests/mocked/parity_accounts.rs index 304ffd45e..72c1aef2c 100644 --- a/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -230,7 +230,7 @@ fn should_be_able_to_kill_account() { let address = accounts[0]; let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0xf00baba2f00baba2f00baba2f00baba2f00baba2"], "id": 1}}"#); - let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid length 1, expected a tuple of size 2","data":null},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid length 1, expected a tuple of size 2"},"id":1}"#; let res = tester.io.handle_request_sync(&request); assert_eq!(res, Some(response.into())); diff --git a/signer/Cargo.toml b/signer/Cargo.toml index ba33bad68..22964104b 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -13,6 +13,7 @@ rustc_version = "0.1" [dependencies] rand = "0.3.14" jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } +jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git" } log = "0.3" env_logger = "0.3" parity-dapps-glue = { version = "1.4", optional = true } diff --git a/signer/src/lib.rs b/signer/src/lib.rs index 5cc103ba8..d211e2eac 100644 --- a/signer/src/lib.rs +++ b/signer/src/lib.rs @@ -30,21 +30,23 @@ //! //! ``` //! extern crate jsonrpc_core; +//! extern crate jsonrpc_server_utils; //! extern crate ethcore_signer; //! extern crate ethcore_rpc; //! //! use std::sync::Arc; //! use jsonrpc_core::IoHandler; -//! use jsonrpc_core::reactor::RpcEventLoop; +//! use jsonrpc_server_utils::reactor::RpcEventLoop; //! use ethcore_signer::ServerBuilder; //! use ethcore_rpc::ConfirmationsQueue; //! //! fn main() { //! let queue = Arc::new(ConfirmationsQueue::default()); -//! let io = Arc::new(IoHandler::new().into()); -//! let event_loop = RpcEventLoop::spawn(); +//! let io = IoHandler::default(); +//! let event_loop = RpcEventLoop::spawn().unwrap(); +//! let remote = event_loop.remote(); //! let _server = ServerBuilder::new(queue, "/tmp/authcodes".into()) -//! .start("127.0.0.1:8084".parse().unwrap(), event_loop.handler(io)); +//! .start("127.0.0.1:8084".parse().unwrap(), io, remote); //! } //! ``` @@ -57,6 +59,7 @@ extern crate ethcore_util as util; extern crate ethcore_rpc as rpc; extern crate ethcore_io as io; extern crate jsonrpc_core; +extern crate jsonrpc_server_utils; extern crate ws; extern crate ethcore_devtools as devtools; diff --git a/signer/src/tests/mod.rs b/signer/src/tests/mod.rs index 7de3a167a..bc90a6cd3 100644 --- a/signer/src/tests/mod.rs +++ b/signer/src/tests/mod.rs @@ -22,7 +22,7 @@ use devtools::RandomTempPath; use rpc::ConfirmationsQueue; use jsonrpc_core::IoHandler; -use jsonrpc_core::reactor::RpcEventLoop; +use jsonrpc_server_utils::reactor::RpcEventLoop; use rand; use ServerBuilder; @@ -70,9 +70,10 @@ pub fn serve() -> (ServerLoop, usize, GuardedAuthCodes) { let queue = Arc::new(ConfirmationsQueue::default()); let builder = ServerBuilder::new(queue, path.to_path_buf()); let port = 35000 + rand::random::() % 10000; - let event_loop = RpcEventLoop::spawn(); - let handler = event_loop.handler(Arc::new(IoHandler::default().into())); - let server = builder.start(format!("127.0.0.1:{}", port).parse().unwrap(), handler).unwrap(); + let event_loop = RpcEventLoop::spawn().unwrap(); + let io = IoHandler::default(); + let remote = event_loop.remote(); + let server = builder.start(format!("127.0.0.1:{}", port).parse().unwrap(), io, remote).unwrap(); let res = ServerLoop { server: server, event_loop: event_loop, diff --git a/signer/src/ws_server/mod.rs b/signer/src/ws_server/mod.rs index b799b0f66..314351938 100644 --- a/signer/src/ws_server/mod.rs +++ b/signer/src/ws_server/mod.rs @@ -26,8 +26,8 @@ use std::thread; use std; use io::{PanicHandler, OnPanicListener, MayPanic}; -use jsonrpc_core::{Metadata, Middleware}; -use jsonrpc_core::reactor::RpcHandler; +use jsonrpc_core::{Metadata, Middleware, MetaIoHandler}; +use jsonrpc_server_utils::tokio_core::reactor::Remote; use rpc::{ConfirmationsQueue}; use rpc::informant::RpcStats; @@ -92,21 +92,28 @@ impl ServerBuilder { /// Starts a new `WebSocket` server in separate thread. /// Returns a `Server` handle which closes the server when droped. - pub fn start>(self, addr: SocketAddr, handler: RpcHandler) -> Result { - self.start_with_extractor(addr, handler, NoopExtractor) + pub fn start, H: Into>>( + self, + addr: SocketAddr, + handler: H, + remote: Remote, + ) -> Result { + self.start_with_extractor(addr, handler, remote, NoopExtractor) } /// Starts a new `WebSocket` server in separate thread. /// Returns a `Server` handle which closes the server when droped. - pub fn start_with_extractor, T: session::MetaExtractor>( + pub fn start_with_extractor, H: Into>, T: session::MetaExtractor>( self, addr: SocketAddr, - handler: RpcHandler, + handler: H, + remote: Remote, meta_extractor: T, ) -> Result { Server::start( addr, - handler, + handler.into(), + remote, self.queue, self.authcodes_path, self.skip_origin_validation, @@ -136,7 +143,8 @@ impl Server { /// Returns a `Server` handle which closes the server when droped. fn start, T: session::MetaExtractor>( addr: SocketAddr, - handler: RpcHandler, + handler: MetaIoHandler, + remote: Remote, queue: Arc, authcodes_path: PathBuf, skip_origin_validation: bool, @@ -156,7 +164,7 @@ impl Server { let origin = format!("{}", addr); let port = addr.port(); let ws = ws::Builder::new().with_settings(config).build( - session::Factory::new(handler, origin, port, authcodes_path, skip_origin_validation, stats, meta_extractor) + session::Factory::new(handler, remote, origin, port, authcodes_path, skip_origin_validation, stats, meta_extractor) )?; let panic_handler = PanicHandler::new_in_arc(); diff --git a/signer/src/ws_server/session.rs b/signer/src/ws_server/session.rs index 5194855ab..91984ff05 100644 --- a/signer/src/ws_server/session.rs +++ b/signer/src/ws_server/session.rs @@ -21,8 +21,9 @@ use std::sync::Arc; use std::str::FromStr; use authcode_store::AuthCodes; -use jsonrpc_core::{Metadata, Middleware}; -use jsonrpc_core::reactor::RpcHandler; +use jsonrpc_core::{Metadata, Middleware, MetaIoHandler}; +use jsonrpc_core::futures::Future; +use jsonrpc_server_utils::tokio_core::reactor::Remote; use rpc::informant::RpcStats; use util::{H256, version}; use ws; @@ -145,7 +146,8 @@ pub struct Session, T> { self_origin: String, self_port: u16, authcodes_path: PathBuf, - handler: RpcHandler, + handler: Arc>, + remote: Remote, file_handler: Arc, stats: Option>, meta_extractor: T, @@ -237,7 +239,7 @@ impl, T: MetaExtractor> ws::Handler for Session // TODO [ToDr] Move to on_connect let metadata = self.meta_extractor.extract_metadata(&self.session_id); - self.handler.handle_request(req, metadata, move |response| { + let future = self.handler.handle_request(req, metadata).map(move |response| { if let Some(result) = response { let res = out.send(result); if let Err(e) = res { @@ -245,12 +247,14 @@ impl, T: MetaExtractor> ws::Handler for Session } } }); + self.remote.spawn(move |_| future); Ok(()) } } pub struct Factory, T> { - handler: RpcHandler, + handler: Arc>, + remote: Remote, skip_origin_validation: bool, self_origin: String, self_port: u16, @@ -262,7 +266,8 @@ pub struct Factory, T> { impl, T> Factory { pub fn new( - handler: RpcHandler, + handler: MetaIoHandler, + remote: Remote, self_origin: String, self_port: u16, authcodes_path: PathBuf, @@ -271,7 +276,8 @@ impl, T> Factory { meta_extractor: T, ) -> Self { Factory { - handler: handler, + handler: Arc::new(handler), + remote: remote, skip_origin_validation: skip_origin_validation, self_origin: self_origin, self_port: self_port, @@ -293,6 +299,7 @@ impl, T: MetaExtractor> ws::Factory for Factory session_id: 0.into(), out: sender, handler: self.handler.clone(), + remote: self.remote.clone(), skip_origin_validation: self.skip_origin_validation, self_origin: self.self_origin.clone(), self_port: self.self_port, diff --git a/stratum/src/lib.rs b/stratum/src/lib.rs index 59964773c..8aac33655 100644 --- a/stratum/src/lib.rs +++ b/stratum/src/lib.rs @@ -44,8 +44,8 @@ pub use traits::{ }; use jsonrpc_tcp_server::{ - Server as JsonRpcServer, RequestContext, MetaExtractor, Dispatcher, - PushMessageError + Server as JsonRpcServer, ServerBuilder as JsonRpcServerBuilder, + RequestContext, MetaExtractor, Dispatcher, PushMessageError, }; use jsonrpc_core::{MetaIoHandler, Params, to_value, Value, Metadata, Compatibility}; use jsonrpc_macros::IoDelegate; @@ -57,6 +57,8 @@ use util::{H256, Hashable, RwLock, RwLockReadGuard}; type RpcResult = BoxFuture; +const NOTIFY_COUNTER_INITIAL: u32 = 16; + struct StratumRpc { stratum: RwLock>>, } @@ -112,7 +114,7 @@ impl MetaExtractor for PeerMetaExtractor { } pub struct Stratum { - rpc_server: JsonRpcServer, + rpc_server: Option, /// Subscribed clients subscribers: RwLock>, /// List of workers supposed to receive job update @@ -129,7 +131,11 @@ pub struct Stratum { tcp_dispatcher: Dispatcher, } -const NOTIFY_COUNTER_INITIAL: u32 = 16; +impl Drop for Stratum { + fn drop(&mut self) { + self.rpc_server.take().map(|server| server.close()); + } +} impl Stratum { pub fn start( @@ -148,12 +154,14 @@ impl Stratum { let mut handler = MetaIoHandler::::with_compatibility(Compatibility::Both); handler.extend_with(delegate); - let server = JsonRpcServer::new(addr.clone(), Arc::new(handler)) - .extractor(Arc::new(PeerMetaExtractor) as Arc>); + let server = JsonRpcServerBuilder::new(handler) + .session_meta_extractor(PeerMetaExtractor); + let tcp_dispatcher = server.dispatcher(); + let server = server.start(addr)?; let stratum = Arc::new(Stratum { - tcp_dispatcher: server.dispatcher(), - rpc_server: server, + tcp_dispatcher: tcp_dispatcher, + rpc_server: Some(server), subscribers: RwLock::new(Vec::new()), job_que: RwLock::new(HashSet::new()), dispatcher: dispatcher, @@ -162,10 +170,6 @@ impl Stratum { notify_counter: RwLock::new(NOTIFY_COUNTER_INITIAL), }); *rpc.stratum.write() = Some(stratum.clone()); - - let running_stratum = stratum.clone(); - ::std::thread::spawn(move || running_stratum.rpc_server.run()); - Ok(stratum) } diff --git a/util/reactor/src/lib.rs b/util/reactor/src/lib.rs index 73ce9e404..c1d7f8631 100644 --- a/util/reactor/src/lib.rs +++ b/util/reactor/src/lib.rs @@ -24,7 +24,7 @@ use std::thread; use std::sync::mpsc; use std::time::Duration; use futures::{Future, IntoFuture}; -use self::tokio_core::reactor::{Remote as TokioRemote, Timeout}; +pub use tokio_core::reactor::{Remote as TokioRemote, Timeout}; /// Event Loop for futures. /// Wrapper around `tokio::reactor::Core`. @@ -47,7 +47,7 @@ impl EventLoop { let remote = rx.recv().expect("tx is transfered to a newly spawned thread."); EventLoop { - remote: Remote{ + remote: Remote { inner: Mode::Tokio(remote), }, handle: EventLoopHandle { From 599f81daa9193529db06c6e2d1df180ce8c2a549 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 13 Mar 2017 16:06:58 +0100 Subject: [PATCH 33/89] split request filling into fill,complete --- ethcore/light/src/types/request/builder.rs | 9 +- ethcore/light/src/types/request/mod.rs | 291 +++++++++++---------- 2 files changed, 155 insertions(+), 145 deletions(-) diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index cdd3a086f..77f1389c2 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -105,9 +105,8 @@ impl Requests { if self.answered == self.requests.len() { None } else { - let outputs = &self.outputs; Some(self.requests[self.answered].clone() - .fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + .complete() .expect("All outputs checked as invariant of `Requests` object; qed")) } } @@ -130,6 +129,12 @@ impl Requests { }); self.answered += 1; + + // fill as much of the next request as we can. + if let Some(ref mut req) = self.requests.get_mut(self.answered) { + req.fill(|req_idx, out_idx| outputs.get(&(req_idx, out_idx)).cloned().ok_or(NoSuchOutput)) + } + Ok(()) } } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 165dff742..1ebe1c75b 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -88,6 +88,16 @@ pub enum Field { BackReference(usize, usize), } +impl Field { + // attempt conversion into scalar value. + fn into_scalar(self) -> Result { + match self { + Field::Scalar(val) => Ok(val), + _ => Err(NoSuchOutput), + } + } +} + impl From for Field { fn from(val: T) -> Self { Field::Scalar(val) @@ -318,19 +328,30 @@ impl IncompleteRequest for Request { } } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - Ok(match self { - Request::Headers(req) => CompleteRequest::Headers(req.fill(oracle)?), - Request::HeaderProof(req) => CompleteRequest::HeaderProof(req.fill(oracle)?), - Request::Receipts(req) => CompleteRequest::Receipts(req.fill(oracle)?), - Request::Body(req) => CompleteRequest::Body(req.fill(oracle)?), - Request::Account(req) => CompleteRequest::Account(req.fill(oracle)?), - Request::Storage(req) => CompleteRequest::Storage(req.fill(oracle)?), - Request::Code(req) => CompleteRequest::Code(req.fill(oracle)?), - Request::Execution(req) => CompleteRequest::Execution(req.fill(oracle)?), - }) + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + match *self { + Request::Headers(ref mut req) => req.fill(oracle), + Request::HeaderProof(ref mut req) => req.fill(oracle), + Request::Receipts(ref mut req) => req.fill(oracle), + Request::Body(ref mut req) => req.fill(oracle), + Request::Account(ref mut req) => req.fill(oracle), + Request::Storage(ref mut req) => req.fill(oracle), + Request::Code(ref mut req) => req.fill(oracle), + Request::Execution(ref mut req) => req.fill(oracle), + } + } + + fn complete(self) -> Result { + match self { + Request::Headers(req) => req.complete().map(CompleteRequest::Headers), + Request::HeaderProof(req) => req.complete().map(CompleteRequest::HeaderProof), + Request::Receipts(req) => req.complete().map(CompleteRequest::Receipts), + Request::Body(req) => req.complete().map(CompleteRequest::Body), + Request::Account(req) => req.complete().map(CompleteRequest::Account), + Request::Storage(req) => req.complete().map(CompleteRequest::Storage), + Request::Code(req) => req.complete().map(CompleteRequest::Code), + Request::Execution(req) => req.complete().map(CompleteRequest::Execution), + } } } @@ -486,13 +507,16 @@ pub trait IncompleteRequest: Sized { /// Note that this request will produce the following outputs. fn note_outputs(&self, f: F) where F: FnMut(usize, OutputKind); - /// Fill the request. + /// Fill fields of the request. /// /// This function is provided an "output oracle" which allows fetching of /// prior request outputs. - /// Only outputs previously checked with `check_outputs` will be available. - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result; + /// Only outputs previously checked with `check_outputs` may be available. + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result; + + /// Attempt to convert this request into its complete variant. + /// Will succeed if all fields have been filled, will fail otherwise. + fn complete(self) -> Result; } /// Header request. @@ -551,25 +575,24 @@ pub mod header { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) { } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let start = match self.start { - Field::Scalar(start) => start, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash.into(), - Output::Number(num) => num.into(), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.start { + self.start = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + Ok(Output::Number(num)) => Field::Scalar(num.into()), + Err(_) => Field::BackReference(req, idx), } - }; + } + } + fn complete(self) -> Result { Ok(Complete { - start: start, + start: self.start.into_scalar()?, skip: self.skip, max: self.max, reverse: self.reverse, }) } - } /// A complete header request. @@ -671,22 +694,20 @@ pub mod header_proof { note(0, OutputKind::Hash); } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let num = match self.num { - Field::Scalar(num) => num, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Number(num) => num, - _ => return Err(NoSuchOutput), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.num { + self.num = match oracle(req, idx) { + Ok(Output::Number(num)) => Field::Scalar(num.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - num: num, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + num: self.num.into_scalar()?, + }) + } } /// A complete header proof request. @@ -779,19 +800,18 @@ pub mod block_receipts { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let hash = match self.hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Number(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), } - }; + } + } + fn complete(self) -> Result { Ok(Complete { - hash: hash, + hash: self.hash.into_scalar()?, }) } } @@ -875,22 +895,20 @@ pub mod block_body { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let hash = match self.hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput), + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.hash { + self.hash = match oracle(req, idx) { + Ok(Output::Hash(hash)) => Field::Scalar(hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - hash: hash, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + hash: self.hash.into_scalar()?, + }) + } } /// A complete block body request. @@ -991,31 +1009,28 @@ pub mod account { f(1, OutputKind::Hash); } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let address_hash = match self.address_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - block_hash: block_hash, - address_hash: address_hash, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + }) + } } /// A complete request for an account. @@ -1138,40 +1153,36 @@ pub mod storage { f(0, OutputKind::Hash); } - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let address_hash = match self.address_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.address_hash { + self.address_hash = match oracle(req, idx) { + Ok(Output::Hash(address_hash)) => Field::Scalar(address_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let key_hash = match self.key_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.key_hash { + self.key_hash = match oracle(req, idx) { + Ok(Output::Hash(key_hash)) => Field::Scalar(key_hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - block_hash: block_hash, - address_hash: address_hash, - key_hash: key_hash - }) + } } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + address_hash: self.address_hash.into_scalar()?, + key_hash: self.key_hash.into_scalar()?, + }) + } } /// A complete request for a storage proof. @@ -1272,31 +1283,28 @@ pub mod contract_code { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; + } - let code_hash = match self.code_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + if let Field::BackReference(req, idx) = self.code_hash { + self.code_hash = match oracle(req, idx) { + Ok(Output::Hash(code_hash)) => Field::Scalar(code_hash.into()), + _ => Field::BackReference(req, idx), } - }; - - Ok(Complete { - block_hash: block_hash, - code_hash: code_hash, - }) + } } + fn complete(self) -> Result { + Ok(Complete { + block_hash: self.block_hash.into_scalar()?, + code_hash: self.code_hash.into_scalar()?, + }) + } } /// A complete request. @@ -1411,19 +1419,17 @@ pub mod execution { fn note_outputs(&self, _: F) where F: FnMut(usize, OutputKind) {} - fn fill(self, oracle: F) -> Result - where F: Fn(usize, usize) -> Result - { - let block_hash = match self.block_hash { - Field::Scalar(hash) => hash, - Field::BackReference(req, idx) => match oracle(req, idx)? { - Output::Hash(hash) => hash, - _ => return Err(NoSuchOutput)?, + fn fill(&mut self, oracle: F) where F: Fn(usize, usize) -> Result { + if let Field::BackReference(req, idx) = self.block_hash { + self.block_hash = match oracle(req, idx) { + Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()), + _ => Field::BackReference(req, idx), } - }; - + } + } + fn complete(self) -> Result { Ok(Complete { - block_hash: block_hash, + block_hash: self.block_hash.into_scalar()?, from: self.from, action: self.action, gas: self.gas, @@ -1432,7 +1438,6 @@ pub mod execution { data: self.data, }) } - } /// A complete request. From 491eeb9878341336071a0e9de80e4afc758cc5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 16 Mar 2017 12:48:51 +0100 Subject: [PATCH 34/89] Better invalid encoding messages --- Cargo.lock | 310 +++++++++++++++-------------- Cargo.toml | 2 +- dapps/src/api/api.rs | 2 +- dapps/src/rpc.rs | 6 +- ethcore/light/src/on_demand/mod.rs | 18 +- ethcore/src/spec/spec.rs | 2 +- ipfs/Cargo.toml | 2 +- ipfs/src/lib.rs | 16 +- json/src/hash.rs | 8 +- json/src/uint.rs | 10 +- parity/rpc.rs | 20 +- rpc/src/lib.rs | 15 +- rpc/src/v1/impls/signing.rs | 12 +- rpc/src/v1/types/hash.rs | 9 +- rpc/src/v1/types/uint.rs | 16 +- rpc_client/Cargo.toml | 2 +- stratum/Cargo.toml | 7 +- util/reactor/src/lib.rs | 7 +- 18 files changed, 239 insertions(+), 225 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 257872770..8be5bd809 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,7 +29,7 @@ dependencies = [ "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -192,6 +192,15 @@ dependencies = [ "stable-heap 0.1.0 (git+https://github.com/carllerche/stable-heap?rev=3c5cd1ca47)", ] +[[package]] +name = "bytes" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "cfg-if" version = "0.1.0" @@ -445,7 +454,7 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -555,7 +564,7 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-network 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -621,7 +630,7 @@ dependencies = [ "ethstore 0.1.0", "ethsync 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -688,15 +697,14 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", "semver 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -851,7 +859,7 @@ dependencies = [ name = "fetch" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -870,11 +878,8 @@ dependencies = [ [[package]] name = "futures" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "futures-cpupool" @@ -882,7 +887,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1050,6 +1055,15 @@ name = "integer-encoding" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "iovec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ipc-common-types" version = "1.7.0" @@ -1083,47 +1097,22 @@ name = "itoa" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "jsonrpc-core" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" -dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "jsonrpc-http-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" -dependencies = [ - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1136,33 +1125,19 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ - "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "jsonrpc-macros" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" -dependencies = [ - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1172,7 +1147,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1182,25 +1157,24 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#e9b58f07619c77de8f304c0589be12a705b20971" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-tcp-server" -version = "6.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6#86d7a89c85f324b5f6671315d9b71010ca995300" +version = "7.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" dependencies = [ - "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", - "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1228,6 +1202,11 @@ name = "lazycell" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "lazycell" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "libc" version = "0.2.16" @@ -1316,38 +1295,6 @@ dependencies = [ "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "mio" -version = "0.5.1" -source = "git+https://github.com/ethcore/mio?branch=v0.5.x#3842d3b250ffd7bd9b16f9586b875ddcbac2b0dd" -dependencies = [ - "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "mio" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "mio" version = "0.6.0-dev" @@ -1356,7 +1303,7 @@ dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)", @@ -1372,7 +1319,7 @@ dependencies = [ "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1381,32 +1328,56 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "mio-named-pipes" +version = "0.1.4" +source = "git+https://github.com/alexcrichton/mio-named-pipes#903dc2f7eac6700c62bfdda258a599db13a9228f" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "mio-uds" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miow" -version = "0.1.3" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miow" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1484,15 +1455,6 @@ dependencies = [ "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "nix" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "nix" version = "0.6.0" @@ -1689,7 +1651,7 @@ dependencies = [ "ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1706,7 +1668,7 @@ dependencies = [ "ethcore 1.7.0", "ethcore-util 1.7.0", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -1731,8 +1693,8 @@ dependencies = [ name = "parity-reactor" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1742,8 +1704,8 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-signer 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1753,6 +1715,22 @@ dependencies = [ "ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)", ] +[[package]] +name = "parity-tokio-ipc" +version = "0.1.0" +source = "git+https://github.com/nikvolf/parity-tokio-ipc#3d4234de6bdc78688ef803935111003080fd5375" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", + "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)", + "tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)", + "tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parity-ui" version = "1.7.0" @@ -2060,7 +2038,7 @@ dependencies = [ "ethcore-bigint 0.1.2", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc-client 1.4.0", "rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2239,11 +2217,6 @@ name = "siphasher" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "slab" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "slab" version = "0.2.0" @@ -2408,29 +2381,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "tokio-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-io" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-line" +version = "0.1.0" +source = "git+https://github.com/tokio-rs/tokio-line#482614ae0c82daf584727ae65a80d854fe861f81" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-named-pipes" +version = "0.1.0" +source = "git+https://github.com/alexcrichton/tokio-named-pipes#3a22f8fc9a441b548aec25bd5df3b1e0ab99fabe" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-proto" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2439,7 +2443,7 @@ name = "tokio-service" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2447,11 +2451,11 @@ name = "tokio-uds" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2662,6 +2666,7 @@ dependencies = [ "checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "" +"checksum bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "46112a0060ae15e3a3f9a445428a53e082b91215b744fa27a1948842f4a64b96" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e53e6cdfa5ca294863e8c8a32a7cdb4dc0a442c8971d47a0e75b6c27ea268a6a" "checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32" @@ -2684,7 +2689,7 @@ dependencies = [ "checksum ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d8f6cc4c1acd005f48e1d17b06a461adac8fb6eeeb331fbf19a0e656fba91cd" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" -"checksum futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c1913eb7083840b1bbcbf9631b7fda55eaf35fe7ead13cca034e8946f9e2bc41" +"checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d" "checksum futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bb982bb25cd8fa5da6a8eb3a460354c984ff1113da82bcb4f0b0862b5795db82" "checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312" "checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518" @@ -2701,23 +2706,22 @@ dependencies = [ "checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11" "checksum igd 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c8c12b1795b8b168f577c45fa10379b3814dcb11b7ab702406001f0d63f40484" "checksum integer-encoding 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a053c9c7dcb7db1f2aa012c37dc176c62e4cdf14898dee0eecc606de835b8acb" +"checksum iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29d062ee61fccdf25be172e70f34c9f6efc597e1fb8f6526e8437b2046ab26be" "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d95557e7ba6b71377b0f2c3b3ae96c53f1b75a926a6901a500f557a370af730a" "checksum itoa 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91fd9dc2c587067de817fec4ad355e3818c3d893a78cab32a0a474c7a15bb8d5" -"checksum jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" "checksum jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" "checksum jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" "checksum jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git?branch=parity-1.6)" = "" +"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b" +"checksum lazycell 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ec38a5c22f1ef3e30d2642aa875620d60edeef36cef43c4739d86215ce816331" "checksum libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)" = "408014cace30ee0f767b1c4517980646a573ec61a57957aeeabcac8ac0a02e8d" "checksum libusb 0.3.0 (git+https://github.com/ethcore/libusb-rs)" = "" "checksum libusb-sys 0.2.3 (git+https://github.com/ethcore/libusb-sys)" = "" @@ -2730,13 +2734,13 @@ dependencies = [ "checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2" "checksum mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e50bf542f81754ef69e5cea856946a3819f7c09ea97b4903c8bc8a89f74e7b6" "checksum miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "9d1f4d337a01c32e1f2122510fed46393d53ca35a7f429cb0450abaedfa3ed54" -"checksum mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)" = "" -"checksum mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a637d1ca14eacae06296a008fa7ad955347e34efcb5891cfd8ba05491a37907e" "checksum mio 0.6.0-dev (git+https://github.com/ethcore/mio?branch=timer-fix)" = "" "checksum mio 0.6.1 (git+https://github.com/ethcore/mio)" = "" -"checksum mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "410a1a0ff76f5a226f1e4e3ff1756128e65cd30166e39c3892283e2ac09d5b67" +"checksum mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5b493dc9fd96bd2077f2117f178172b0765db4dfda3ea4d8000401e6d65d3e80" +"checksum mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)" = "" "checksum mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "78437f00d9615c366932cbfe79790b5c2945706ba67cf78378ffacc0069ed9de" -"checksum miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d5bfc6782530ac8ace97af10a540054a37126b63b0702ddaaa243b73b5745b9a" +"checksum miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3e690c5df6b2f60acd45d56378981e827ff8295562fc8d34f573deb267a59cd1" +"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum msdos_time 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c04b68cc63a8480fb2550343695f7be72effdec953a9d4508161c3e69041c7d8" "checksum multibase 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b9c35dac080fd6e16a99924c8dfdef0af89d797dd851adab25feaffacf7850d6" "checksum multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "755d5a39bee3faaf649437e873beab334990221b2faf1f2e56ca10a9e4600235" @@ -2744,7 +2748,6 @@ dependencies = [ "checksum nanomsg-sys 0.5.0 (git+https://github.com/ethcore/nanomsg.rs.git)" = "" "checksum native-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa4e52995154bb6f0b41e4379a279482c9387c1632e3798ba4e511ef8c54ee09" "checksum net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "6a816012ca11cb47009693c1e0c6130e26d39e4d97ee2a13c50e868ec83e3204" -"checksum nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f05c2fc965fc1cd6b73fa57fa7b89f288178737f2f3ce9e63e4a6a141189000e" "checksum nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a7bb1da2be7da3cbffda73fc681d509ffd9e665af478d2bee1907cee0bc64b2" "checksum nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0d95c5fa8b641c10ad0b8887454ebaafa3c92b5cd5350f8fc693adafd178e7b" "checksum nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4d9a22dbcebdeef7bf275cbf444d6521d4e7a2fee187b72d80dba0817120dd8f" @@ -2766,6 +2769,7 @@ dependencies = [ "checksum order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "efa535d5117d3661134dbf1719b6f0ffe06f2375843b13935db186cd094105eb" "checksum owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d91377085359426407a287ab16884a0111ba473aa6844ff01d4ec20ce3d75e7" "checksum parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98378dec0a185da2b7180308752f0bad73aaa949c3e0a3b0528d0e067945f7ab" +"checksum parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "" "checksum parity-ui-precompiled 1.4.0 (git+https://github.com/ethcore/js-precompiled.git)" = "" "checksum parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e1435e7a2a00dfebededd6c6bdbd54008001e94b4a2aadd6aef0dc4c56317621" "checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068" @@ -2816,7 +2820,6 @@ dependencies = [ "checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c" "checksum shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f20b8f3c060374edb8046591ba28f62448c369ccbdc7b02075103fb3a9e38d" "checksum siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c44e42fa187b5a8782489cf7740cc27c3125806be2bf33563cf5e02e9533fcd" -"checksum slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d807fd58c4181bbabed77cb3b891ba9748241a552bcc5be698faaebefc54f46e" "checksum slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)" = "" "checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" @@ -2839,7 +2842,10 @@ dependencies = [ "checksum thread_local 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0694f51610ef7cfac7a1b81de7f1602ee5356e76541bcd62c40e71933338cab1" "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" -"checksum tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "52416b3e937abac22a543a7f1c66bd37feb60137ff1ab42390fa02df85347e58" +"checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b" +"checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473" +"checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "" +"checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "" "checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a" diff --git a/Cargo.toml b/Cargo.toml index 2a01d2c8e..8725aaa78 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ app_dirs = "1.1.1" fdlimit = "0.1" hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } ethsync = { path = "sync" } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index ce8f495e6..e07bd4535 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -117,7 +117,7 @@ impl RestApiRouter { impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { - self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains); + self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into(); if let Method::Options = *request.method() { self.handler = response::empty(); diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index 0c95051e4..e7f5eef99 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -49,8 +49,10 @@ struct RpcEndpoint> { #[derive(Default)] struct NoopMiddleware; impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, _request: &hyper::server::Request) -> http::RequestMiddlewareAction { - http::RequestMiddlewareAction::Proceed + fn on_request(&self, request: &hyper::server::Request) -> http::RequestMiddlewareAction { + http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: request.headers().get::().is_none(), + } } } diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 25cde402b..585985f05 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -74,6 +74,8 @@ pub struct OnDemand { orphaned_requests: RwLock>, } +const RECEIVER_IN_SCOPE: &'static str = "Receiver is still in scope, so it's not dropped; qed"; + impl OnDemand { /// Create a new `OnDemand` service with the given cache. pub fn new(cache: Arc>) -> Self { @@ -95,7 +97,7 @@ impl OnDemand { }; match cached { - Some(hdr) => sender.complete(hdr), + Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), None => self.dispatch_header_by_number(ctx, req, ChtProofSender::Header(sender)), } receiver @@ -111,7 +113,7 @@ impl OnDemand { }; match cached { - Some(score) => sender.complete(score), + Some(score) => sender.send(score).expect(RECEIVER_IN_SCOPE), None => self.dispatch_header_by_number(ctx, req, ChtProofSender::ChainScore(sender)), } @@ -132,7 +134,7 @@ impl OnDemand { }; match cached { - (Some(hdr), Some(score)) => sender.complete((hdr, score)), + (Some(hdr), Some(score)) => sender.send((hdr, score)).expect(RECEIVER_IN_SCOPE), _ => self.dispatch_header_by_number(ctx, req, ChtProofSender::Both(sender)), } @@ -183,7 +185,7 @@ impl OnDemand { pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { let (sender, receiver) = oneshot::channel(); match self.cache.lock().block_header(&req.0) { - Some(hdr) => sender.complete(hdr), + Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), None => self.dispatch_header_by_hash(ctx, req, sender), } receiver @@ -241,7 +243,7 @@ impl OnDemand { stream.begin_list(0); stream.begin_list(0); - sender.complete(encoded::Block::new(stream.out())) + sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE) } else { match self.cache.lock().block_body(&req.hash) { Some(body) => { @@ -293,10 +295,10 @@ impl OnDemand { // fast path for empty receipts. if req.0.receipts_root() == SHA3_NULL_RLP { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) } else { match self.cache.lock().block_receipts(&req.0.hash()) { - Some(receipts) => sender.complete(receipts), + Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), None => self.dispatch_block_receipts(ctx, req, sender), } } @@ -381,7 +383,7 @@ impl OnDemand { // fast path for no code. if req.code_hash == ::util::sha3::SHA3_EMPTY { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE) } else { self.dispatch_code(ctx, req, sender); } diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 078908db4..76e925a85 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -323,7 +323,7 @@ impl Spec { pub fn load(reader: R) -> Result where R: Read { match ethjson::spec::Spec::load(reader) { Ok(spec) => Ok(spec.into()), - _ => Err("Spec json is invalid".into()), + Err(e) => Err(format!("Spec json is invalid: {}", e)), } } diff --git a/ipfs/Cargo.toml b/ipfs/Cargo.toml index 6ce5518c3..1443c8cf2 100644 --- a/ipfs/Cargo.toml +++ b/ipfs/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Parity Technologies "] [dependencies] ethcore = { path = "../ethcore" } ethcore-util = { path = "../util" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } +jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } rlp = { path = "../util/rlp" } mime = "0.2" hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 287abb27c..3d79c00fb 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -72,17 +72,6 @@ impl IpfsHandler { client: client, } } - - fn is_origin_allowed(&self, origin_provided: bool) -> bool { - match (origin_provided, self.cors_header.as_ref()) { - // Request without Origin are always OK. - (false, _) => true, - // If there is a cors header to be returned it's ok. - (true, Some(_)) => true, - // If origin is provided and we won't return cors header it's bad. - (true, None) => false, - } - } } /// Implement Hyper's HTTP handler @@ -92,7 +81,6 @@ impl Handler for IpfsHandler { return Next::write(); } - self.cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); if !jsonrpc_http_server::is_host_allowed(&req, &self.allowed_hosts) { self.out = Out::Bad("Disallowed Host header"); @@ -100,11 +88,13 @@ impl Handler for IpfsHandler { return Next::write(); } - if !self.is_origin_allowed(req.headers().get::().is_some()) { + let cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); + if cors_header == jsonrpc_http_server::CorsHeader::Invalid { self.out = Out::Bad("Disallowed Origin header"); return Next::write(); } + self.cors_header = cors_header.into(); let (path, query) = match *req.uri() { RequestUri::AbsolutePath { ref path, ref query } => (path, query.as_ref().map(AsRef::as_ref)), diff --git a/json/src/hash.rs b/json/src/hash.rs index ae6ba1a81..78fa77bd9 100644 --- a/json/src/hash.rs +++ b/json/src/hash.rs @@ -59,11 +59,11 @@ macro_rules! impl_hash { let value = match value.len() { 0 => $inner::from(0), 2 if value == "0x" => $inner::from(0), - _ if value.starts_with("0x") => $inner::from_str(&value[2..]).map_err(|_| { - Error::custom(format!("Invalid hex value {}.", value).as_str()) + _ if value.starts_with("0x") => $inner::from_str(&value[2..]).map_err(|e| { + Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) })?, - _ => $inner::from_str(value).map_err(|_| { - Error::custom(format!("Invalid hex value {}.", value).as_str()) + _ => $inner::from_str(value).map_err(|e| { + Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) })?, }; diff --git a/json/src/uint.rs b/json/src/uint.rs index 281820d78..6b206b380 100644 --- a/json/src/uint.rs +++ b/json/src/uint.rs @@ -63,7 +63,7 @@ impl Visitor for UintVisitor { type Value = Uint; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a hex encoded uint") + write!(formatter, "a hex encoded or decimal uint") } fn visit_u64(self, value: u64) -> Result where E: Error { @@ -74,11 +74,11 @@ impl Visitor for UintVisitor { let value = match value.len() { 0 => U256::from(0), 2 if value.starts_with("0x") => U256::from(0), - _ if value.starts_with("0x") => U256::from_str(&value[2..]).map_err(|_| { - Error::custom(format!("Invalid hex value {}.", value).as_str()) + _ if value.starts_with("0x") => U256::from_str(&value[2..]).map_err(|e| { + Error::custom(format!("Invalid hex value {}: {}", value, e).as_str()) })?, - _ => U256::from_dec_str(value).map_err(|_| { - Error::custom(format!("Invalid decimal value {}.", value).as_str()) + _ => U256::from_dec_str(value).map_err(|e| { + Error::custom(format!("Invalid decimal value {}: {:?}", value, e).as_str()) })? }; diff --git a/parity/rpc.rs b/parity/rpc.rs index b0af8aa0b..a435f24db 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -20,7 +20,7 @@ use std::net::SocketAddr; use std::io; use dir::default_data_path; -use ethcore_rpc::{self as rpc, HttpServerError, IpcServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; +use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use ethcore_rpc::informant::{RpcStats, Middleware}; use helpers::parity_ipc_path; use hyper; @@ -100,6 +100,15 @@ impl rpc::HttpMetaExtractor for RpcExtractor { } } +impl rpc::IpcMetaExtractor for RpcExtractor { + fn extract(&self, _req: &rpc::IpcRequestContext) -> Metadata { + let mut metadata = Metadata::default(); + // TODO [ToDr] Extract proper session id when it's available in context. + metadata.origin = Origin::Ipc(1.into()); + metadata + } +} + pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { return Ok(None); @@ -136,17 +145,16 @@ pub fn setup_http_rpc_server( } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result>, String> { +pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { return Ok(None); } Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) } -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result, String> { +pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { let handler = setup_apis(apis, dependencies); let remote = dependencies.remote.clone(); - match rpc::start_ipc(addr, handler, remote) { - Err(IpcServerError::Io(io_error)) => Err(format!("RPC io error: {}", io_error)), - Err(any_error) => Err(format!("Rpc error: {:?}", any_error)), + match rpc::start_ipc(addr, handler, remote, RpcExtractor) { + Err(io_error) => Err(format!("RPC io error: {}", io_error)), Ok(server) => Ok(server) } } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 7a9ee5a22..9dc2f6f29 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -63,7 +63,7 @@ extern crate ethcore_devtools as devtools; pub mod v1; -pub use ipc::{Server as IpcServer, Error as IpcServerError}; +pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext}; pub use http::{HttpMetaExtractor, Server as HttpServer, Error as HttpServerError, AccessControlAllowOrigin, Host}; pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; @@ -95,16 +95,19 @@ pub fn start_http( } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_ipc( +pub fn start_ipc( addr: &str, handler: H, remote: tokio_core::reactor::Remote, -) -> Result, ipc::Error> where + extractor: T, +) -> ::std::io::Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, H: Into>, + T: IpcMetaExtractor, { - let server = ipc::Server::with_remote(addr, handler, ipc::UninitializedRemote::Shared(remote))?; - server.run_async()?; - Ok(server) + ipc::ServerBuilder::new(handler) + .event_loop_remote(remote) + .session_metadata_extractor(extractor) + .start(addr) } diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index d737131a6..c322588c5 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -200,11 +200,11 @@ impl EthSigning for SigningQueueClient { res.then(move |res| { handle_dispatch(res, move |response| { - match response { + ignore_error(match response { Ok(RpcConfirmationResponse::Signature(sig)) => ready.complete(Ok(sig)), Err(e) => ready.complete(Err(e)), e => ready.complete(Err(errors::internal("Unexpected result.", e))), - } + }) }); p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) @@ -222,11 +222,11 @@ impl EthSigning for SigningQueueClient { res.then(move |res| { handle_dispatch(res, move |response| { - match response { + ignore_error(match response { Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.complete(Ok(hash)), Err(e) => ready.complete(Err(e)), e => ready.complete(Err(errors::internal("Unexpected result.", e))), - } + }) }); p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) @@ -244,11 +244,11 @@ impl EthSigning for SigningQueueClient { res.then(move |res| { handle_dispatch(res, move |response| { - match response { + ignore_error(match response { Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.complete(Ok(tx)), Err(e) => ready.complete(Err(e)), e => ready.complete(Err(errors::internal("Unexpected result.", e))), - } + }) }); p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) diff --git a/rpc/src/v1/types/hash.rs b/rpc/src/v1/types/hash.rs index c96a3433b..791042fe0 100644 --- a/rpc/src/v1/types/hash.rs +++ b/rpc/src/v1/types/hash.rs @@ -124,13 +124,16 @@ macro_rules! impl_hash { type Value = $name; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed, padded, hex-encoded hash of type {}", stringify!($name)) + write!(formatter, "a 0x-prefixed, padded, hex-encoded hash with length {}", $size * 2) } fn visit_str(self, value: &str) -> Result where E: serde::de::Error { + if value.len() < 2 || &value[0..2] != "0x" { + return Err(E::custom("Expected hex-encoded hash with 0x prefix.")); + } if value.len() != 2 + $size * 2 { - return Err(E::custom("Invalid length.")); + return Err(E::invalid_length(value.len() - 2, &self)); } match value[2..].from_hex() { @@ -139,7 +142,7 @@ macro_rules! impl_hash { result.copy_from_slice(v); Ok($name(result)) }, - _ => Err(E::custom("Invalid hex value.")) + Err(e) => Err(E::custom(format!("Invalid hex value: {:?}", e))), } } diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index ba3b83fa7..e646ec6c2 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -74,20 +74,20 @@ macro_rules! impl_uint { type Value = $name; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed, hex-encoded number of type {}", stringify!($name)) + write!(formatter, "a 0x-prefixed, hex-encoded number of length {}", $size*16) } fn visit_str(self, value: &str) -> Result where E: serde::de::Error { + if value.len() < 2 || &value[0..2] != "0x" { + return Err(E::custom("Use hex-encoded numbers with 0x prefix.")) + } + // 0x + len - if value.len() > 2 + $size * 16 || value.len() < 2 { - return Err(E::custom("Invalid length.")); + if value.len() > 2 + $size * 16 { + return Err(E::invalid_length(value.len() - 2, &self)); } - if &value[0..2] != "0x" { - return Err(E::custom("Use hex encoded numbers with 0x prefix.")) - } - - $other::from_str(&value[2..]).map($name).map_err(|_| E::custom("Invalid hex value.")) + $other::from_str(&value[2..]).map($name).map_err(|e| E::custom(&format!("Invalid hex value: {:?}", e))) } fn visit_string(self, value: String) -> Result where E: serde::de::Error { diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index 77f9c3edf..a70816f9e 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -14,7 +14,7 @@ serde = "0.9" serde_json = "0.9" tempdir = "0.3.5" url = "1.2.0" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" } ethcore-rpc = { path = "../rpc" } ethcore-signer = { path = "../signer" } diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index 1b309985a..201792340 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -11,10 +11,9 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] log = "0.3" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } -jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } -jsonrpc-tcp-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "parity-1.6" } -mio = { git = "https://github.com/ethcore/mio", branch = "v0.5.x" } +jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-tcp-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } ethcore-util = { path = "../util" } ethcore-devtools = { path = "../devtools" } lazy_static = "0.2" diff --git a/util/reactor/src/lib.rs b/util/reactor/src/lib.rs index c1d7f8631..e5f04d652 100644 --- a/util/reactor/src/lib.rs +++ b/util/reactor/src/lib.rs @@ -190,7 +190,7 @@ impl From for EventLoopHandle { impl Drop for EventLoopHandle { fn drop(&mut self) { - self.close.take().map(|v| v.complete(())); + self.close.take().map(|v| v.send(())); } } @@ -203,7 +203,8 @@ impl EventLoopHandle { /// Finishes this event loop. pub fn close(mut self) { - self.close.take() - .expect("Close is taken only in `close` and `drop`. `close` is consuming; qed").complete(()) + let _ = self.close.take() + .expect("Close is taken only in `close` and `drop`. `close` is consuming; qed") + .send(()); } } From 579cff478dbb293fb39ef429dc3c7097e9206fc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 16 Mar 2017 15:43:31 +0100 Subject: [PATCH 35/89] Fixing deprecated methods of tokio_core --- Cargo.lock | 57 ++++++++++++++++++------- dapps/Cargo.toml | 1 - dapps/src/lib.rs | 2 +- dapps/src/rpc.rs | 23 +++++----- dapps/src/tests/helpers/fetch.rs | 2 +- dapps/src/tests/rpc.rs | 5 +-- ethcore/Cargo.toml | 5 +-- ethcore/light/src/on_demand/mod.rs | 3 ++ rpc/src/v1/helpers/mod.rs | 1 + rpc/src/v1/helpers/oneshot.rs | 67 ++++++++++++++++++++++++++++++ rpc/src/v1/impls/signing.rs | 57 +++++++++++++------------ rpc/src/v1/tests/helpers/fetch.rs | 2 +- 12 files changed, 158 insertions(+), 67 deletions(-) create mode 100644 rpc/src/v1/helpers/oneshot.rs diff --git a/Cargo.lock b/Cargo.lock index 8be5bd809..7ffc74576 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -409,7 +409,7 @@ dependencies = [ "ethstore 0.1.0", "evmjit 1.7.0", "hardware-wallet 1.7.0", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", + "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -455,7 +455,6 @@ dependencies = [ "ethcore-util 1.7.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -661,7 +660,7 @@ dependencies = [ "ethcore-util 1.7.0", "ethcrypto 0.1.0", "ethkey 0.2.0", - "hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1001,7 +1000,26 @@ dependencies = [ [[package]] name = "hyper" -version = "0.10.4" +version = "0.10.0-a.0" +source = "git+https://github.com/paritytech/hyper#453c683b52208fefc32d29e4ac7c863439b2321f" +dependencies = [ + "cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rotor 0.6.3 (git+https://github.com/ethcore/rotor)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1012,7 +1030,7 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1024,7 +1042,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1100,7 +1118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1112,9 +1130,9 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", + "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1125,7 +1143,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1137,7 +1155,7 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1147,7 +1165,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1157,7 +1175,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1168,7 +1186,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#4706ebc240f15eed85a2670660576541fdda7bbb" +source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -1951,7 +1969,7 @@ name = "reqwest" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "hyper-native-tls 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2479,6 +2497,11 @@ name = "traitobject" version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "traitobject" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "transient-hashmap" version = "0.1.0" @@ -2700,7 +2723,8 @@ dependencies = [ "checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" "checksum hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)" = "" -"checksum hyper 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)" = "220407e5a263f110ec30a071787c9535918fdfc97def5680c90013c3f30c38c1" +"checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "" +"checksum hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)" = "43a15e3273b2133aaac0150478ab443fb89f15c3de41d8d93d8f3bb14bf560f6" "checksum hyper 0.9.18 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9bf64f730d6ee4b0528a5f0a316363da9d8104318731509d4ccc86248f82b3" "checksum hyper-native-tls 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "afe68f772f0497a7205e751626bb8e1718568b58534b6108c73a74ef80483409" "checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11" @@ -2852,6 +2876,7 @@ dependencies = [ "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" "checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72" "checksum traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07eaeb7689bb7fca7ce15628319635758eda769fed481ecfe6686ddef2600616" +"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" "checksum transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15f7cc7116182edca1ed08f6f8c4da92104555ca77addbabea4eaa59b20373d0" "checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" "checksum unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "13a5906ca2b98c799f4b1ab4557b76367ebd6ae5ef14930ec841c74aed5f3764" diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 5bbad53c7..95e1f3f56 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -25,7 +25,6 @@ unicase = "1.3" url = "1.0" zip = { version = "0.1", default-features = false } -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } # TODO [ToDr] Temporary solution, server should be merged with RPC. diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index eca6fd991..252e1c3bb 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -21,7 +21,6 @@ extern crate base32; extern crate futures; -extern crate hyper; extern crate linked_hash_map; extern crate mime_guess; extern crate rand; @@ -78,6 +77,7 @@ use std::collections::HashMap; use jsonrpc_core::{Middleware, MetaIoHandler}; use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote; pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin}; +pub use jsonrpc_http_server::hyper; use ethcore_rpc::Metadata; use fetch::{Fetch, Client as FetchClient}; diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index e7f5eef99..6ddb31db0 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -46,15 +46,6 @@ struct RpcEndpoint> { allowed_hosts: Option>, } -#[derive(Default)] -struct NoopMiddleware; -impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, request: &hyper::server::Request) -> http::RequestMiddlewareAction { - http::RequestMiddlewareAction::Proceed { - should_continue_on_invalid_cors: request.headers().get::().is_none(), - } - } -} impl> Endpoint for RpcEndpoint { fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box { @@ -72,10 +63,20 @@ impl> Endpoint for RpcEndpoint { } } +#[derive(Default)] +struct NoopMiddleware; +impl http::RequestMiddleware for NoopMiddleware { + fn on_request(&self, request: &http::hyper::server::Request) -> http::RequestMiddlewareAction { + http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: request.headers().get::().is_none(), + } + } +} + struct MetadataExtractor; impl HttpMetaExtractor for MetadataExtractor { - fn read_metadata(&self, request: &hyper::server::Request) -> Metadata { - let dapp_id = request.headers().get::() + fn read_metadata(&self, request: &http::hyper::server::Request) -> Metadata { + let dapp_id = request.headers().get::() .map(|origin| format!("{}://{}", origin.scheme, origin.host)) .or_else(|| { // fallback to custom header, but only if origin is null diff --git a/dapps/src/tests/helpers/fetch.rs b/dapps/src/tests/helpers/fetch.rs index fcfd4db9c..e6e875c51 100644 --- a/dapps/src/tests/helpers/fetch.rs +++ b/dapps/src/tests/helpers/fetch.rs @@ -114,7 +114,7 @@ impl Fetch for FakeFetch { let data = response.lock().take().unwrap_or(b"Some content"); let cursor = io::Cursor::new(data); - tx.complete(fetch::Response::from_reader(cursor)); + tx.send(fetch::Response::from_reader(cursor)).unwrap(); }); rx.map_err(|_| fetch::Error::Aborted).boxed() diff --git a/dapps/src/tests/rpc.rs b/dapps/src/tests/rpc.rs index 0dbba384c..2cc4ccb24 100644 --- a/dapps/src/tests/rpc.rs +++ b/dapps/src/tests/rpc.rs @@ -55,8 +55,8 @@ fn should_extract_metadata() { // given let mut io = MetaIoHandler::default(); io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("https://parity.io/".into())); - assert_eq!(meta.dapp_id(), "https://parity.io/".into()); + assert_eq!(meta.origin, Origin::Dapps("".into())); + assert_eq!(meta.dapp_id(), "".into()); future::ok(Value::String("Hello World!".into())).boxed() }); let server = serve_with_rpc(io); @@ -68,7 +68,6 @@ fn should_extract_metadata() { POST /rpc/ HTTP/1.1\r\n\ Host: 127.0.0.1:8080\r\n\ Connection: close\r\n\ - Origin: https://parity.io/\r\n\ X-Parity-Origin: https://this.should.be.ignored\r\n\ Content-Type: application/json\r\n\ Content-Length: {}\r\n\ diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 64010fadf..ae029206a 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -44,10 +44,7 @@ ethcore-stratum = { path = "../stratum" } ethcore-bloom-journal = { path = "../util/bloom" } hardware-wallet = { path = "../hw" } stats = { path = "../util/stats" } - -[dependencies.hyper] -git = "https://github.com/ethcore/hyper" -default-features = false +hyper = { git = "https://github.com/paritytech/hyper", default-features = false } [features] jit = ["evmjit"] diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 585985f05..4941552fc 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -18,6 +18,9 @@ //! The request service is implemented using Futures. Higher level request handlers //! will take the raw data received here and extract meaningful results from it. +// TODO [ToDr] Suppressing deprecation warnings. Rob will fix the API anyway. +#![allow(deprecated)] + use std::collections::HashMap; use std::sync::Arc; diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index 1d6bd14f3..ff1bc9dbe 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -20,6 +20,7 @@ pub mod errors; pub mod block_import; pub mod dispatch; pub mod informant; +pub mod oneshot; mod network_settings; mod poll_manager; diff --git a/rpc/src/v1/helpers/oneshot.rs b/rpc/src/v1/helpers/oneshot.rs new file mode 100644 index 000000000..c128ccf55 --- /dev/null +++ b/rpc/src/v1/helpers/oneshot.rs @@ -0,0 +1,67 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core::Error; +use futures::{self, Future}; +use futures::sync::oneshot; +use v1::helpers::errors; + +pub type Res = Result; + +pub struct Sender { + sender: oneshot::Sender>, +} + +impl Sender { + pub fn send(self, data: Res) { + let res = self.sender.send(data); + if let Err(_) = res { + debug!(target: "rpc", "Responding to a no longer active request."); + } + } +} + +pub struct Receiver { + receiver: oneshot::Receiver>, +} + +impl Future for Receiver { + type Item = T; + type Error = Error; + + fn poll(&mut self) -> futures::Poll { + let res = self.receiver.poll(); + match res { + Ok(futures::Async::NotReady) => Ok(futures::Async::NotReady), + Ok(futures::Async::Ready(Ok(res))) => Ok(futures::Async::Ready(res)), + Ok(futures::Async::Ready(Err(err))) => Err(err), + Err(e) => { + debug!(target: "rpc", "Responding to a canceled request: {:?}", e); + Err(errors::internal("Request was canceled by client.", e)) + }, + } + } +} + +pub fn oneshot() -> (Sender, Receiver) { + let (tx, rx) = futures::oneshot(); + + (Sender { + sender: tx, + }, Receiver { + receiver: rx, + }) +} diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index c322588c5..f7a66f082 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -22,10 +22,10 @@ use util::{U256, Mutex}; use ethcore::account_provider::AccountProvider; -use futures::{self, future, BoxFuture, Future}; +use futures::{future, BoxFuture, Future}; use jsonrpc_core::Error; use v1::helpers::{ - errors, + errors, oneshot, DefaultAccount, SigningQueue, ConfirmationPromise, ConfirmationResult, SignerService }; @@ -167,21 +167,20 @@ impl ParitySigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); // when dispatch is complete res.then(move |res| { // register callback via the oneshot sender. handle_dispatch(res, move |response| { match response { - Ok(RpcConfirmationResponse::Decrypt(data)) => ready.complete(Ok(data)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), + Ok(RpcConfirmationResponse::Decrypt(data)) => ready.send(Ok(data)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), } }); - // and wait for that to resolve. - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } } @@ -196,18 +195,18 @@ impl EthSigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); res.then(move |res| { handle_dispatch(res, move |response| { - ignore_error(match response { - Ok(RpcConfirmationResponse::Signature(sig)) => ready.complete(Ok(sig)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), - }) + match response { + Ok(RpcConfirmationResponse::Signature(sig)) => ready.send(Ok(sig)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), + } }); - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } @@ -218,18 +217,18 @@ impl EthSigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); res.then(move |res| { handle_dispatch(res, move |response| { - ignore_error(match response { - Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.complete(Ok(hash)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), - }) + match response { + Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.send(Ok(hash)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), + } }); - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } @@ -240,18 +239,18 @@ impl EthSigning for SigningQueueClient { meta.origin, ); - let (ready, p) = futures::oneshot(); + let (ready, p) = oneshot::oneshot(); res.then(move |res| { handle_dispatch(res, move |response| { - ignore_error(match response { - Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.complete(Ok(tx)), - Err(e) => ready.complete(Err(e)), - e => ready.complete(Err(errors::internal("Unexpected result.", e))), - }) + match response { + Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.send(Ok(tx)), + Err(e) => ready.send(Err(e)), + e => ready.send(Err(errors::internal("Unexpected result.", e))), + } }); - p.then(|result| futures::done(result.expect("Ready is never dropped nor canceled."))) + p }).boxed() } } diff --git a/rpc/src/v1/tests/helpers/fetch.rs b/rpc/src/v1/tests/helpers/fetch.rs index 58ac96bcb..236dae91b 100644 --- a/rpc/src/v1/tests/helpers/fetch.rs +++ b/rpc/src/v1/tests/helpers/fetch.rs @@ -35,7 +35,7 @@ impl Fetch for TestFetch { let (tx, rx) = futures::oneshot(); thread::spawn(move || { let cursor = io::Cursor::new(b"Some content"); - tx.complete(fetch::Response::from_reader(cursor)); + tx.send(fetch::Response::from_reader(cursor)).unwrap(); }); rx.map_err(|_| fetch::Error::Aborted).boxed() From cbb9314531be7b2ea2c2deb41f9ffafd83da19ca Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 20:23:59 +0100 Subject: [PATCH 36/89] use PIP messages in on_demand, old API --- Cargo.lock | 35 +- ethcore/light/src/lib.rs | 2 +- ethcore/light/src/on_demand/mod.rs | 610 ++++++++----------------- ethcore/light/src/on_demand/request.rs | 61 +-- ethcore/light/src/types/request/mod.rs | 4 +- 5 files changed, 227 insertions(+), 485 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6924cfe00..72d0b7778 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -445,7 +445,7 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -554,7 +554,7 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-network 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -620,7 +620,7 @@ dependencies = [ "ethstore 0.1.0", "ethsync 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-ipc-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -686,7 +686,7 @@ dependencies = [ "ethcore-ipc-codegen 1.7.0", "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-macros 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-tcp-server 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -849,7 +849,7 @@ dependencies = [ name = "fetch" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -868,11 +868,8 @@ dependencies = [ [[package]] name = "futures" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "futures-cpupool" @@ -880,7 +877,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1086,7 +1083,7 @@ name = "jsonrpc-core" version = "6.0.0" source = "git+https://github.com/ethcore/jsonrpc.git#86d7a89c85f324b5f6671315d9b71010ca995300" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1623,7 +1620,7 @@ dependencies = [ "ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.7.0", "fetch 0.1.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1665,7 +1662,7 @@ dependencies = [ name = "parity-reactor" version = "0.1.0" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1676,7 +1673,7 @@ dependencies = [ "ethcore-rpc 1.7.0", "ethcore-signer 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 6.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1994,7 +1991,7 @@ dependencies = [ "ethcore-bigint 0.1.2", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc-client 1.4.0", "rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2345,7 +2342,7 @@ name = "tokio-core" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2357,7 +2354,7 @@ name = "tokio-proto" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2373,7 +2370,7 @@ name = "tokio-service" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2606,7 +2603,7 @@ dependencies = [ "checksum ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d8f6cc4c1acd005f48e1d17b06a461adac8fb6eeeb331fbf19a0e656fba91cd" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" -"checksum futures 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c1913eb7083840b1bbcbf9631b7fda55eaf35fe7ead13cca034e8946f9e2bc41" +"checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d" "checksum futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bb982bb25cd8fa5da6a8eb3a460354c984ff1113da82bcb4f0b0862b5795db82" "checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312" "checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518" diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 81a974192..ada58d8de 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -35,7 +35,7 @@ pub mod client; pub mod cht; pub mod net; -//pub mod on_demand; +pub mod on_demand; pub mod transaction_queue; pub mod cache; diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 25cde402b..df8a6c6a9 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -31,12 +31,12 @@ use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::{RlpStream, Stream}; -use util::{Bytes, DBValue, RwLock, Mutex, U256}; +use util::{Bytes, DBValue, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; -use types::les_request::{self as les_request, Request as LesRequest}; +use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; pub mod request; @@ -46,24 +46,85 @@ struct Peer { capabilities: Capabilities, } +impl Peer { + // Whether a given peer can handle a specific request. + fn can_handle(&self, pending: &Pending) -> bool { + match *pending { + Pending::HeaderProof(ref req, _) => + self.capabilities.serve_headers && self.status.head_num > req.num(), + Pending::HeaderByHash(ref req, _) => self.capabilities.serve_headers, + Pending::Block(ref req, _) => + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), + Pending::BlockReceipts(ref req, _) => + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.0.number()), + Pending::Account(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + Pending::Code(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.block_id.1), + Pending::TxProof(ref req, _) => + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + } + } +} + // Which portions of a CHT proof should be sent. enum ChtProofSender { - Both(Sender<(encoded::Header, U256)>), - Header(Sender), + Both(Sender<(H256, U256)>), + Hash(Sender), ChainScore(Sender), } // Attempted request info and sender to put received value. enum Pending { - HeaderByNumber(request::HeaderByNumber, ChtProofSender), + HeaderProof(request::HeaderProof, ChtProofSender), HeaderByHash(request::HeaderByHash, Sender), Block(request::Body, Sender), BlockReceipts(request::BlockReceipts, Sender>), - Account(request::Account, Sender), + Account(request::Account, Sender>), Code(request::Code, Sender), TxProof(request::TransactionProof, Sender>), } +impl Pending { + // Create a network request. + fn make_request(&self) -> NetworkRequest { + match *self { + Pending::HeaderByHash(ref req, _) => NetworkRequest::Headers(basic_request::IncompleteHeadersRequest { + start: basic_request::HashOrNumber::Hash(req.0).into(), + skip: 0, + max: 1, + reverse: false, + }), + Pending::HeaderProof(ref req, _) => NetworkRequest::HeaderProof(basic_request::IncompleteHeaderProofRequest { + num: req.num().into(), + }), + Pending::Block(ref req, _) => NetworkRequest::Body(basic_request::IncompleteBodyRequest { + hash: req.hash.into(), + }), + Pending::BlockReceipts(ref req, _) => NetworkRequest::Receipts(basic_request::IncompleteReceiptsRequest { + hash: req.0.hash().into(), + }), + Pending::Account(ref req, _) => NetworkRequest::Account(basic_request::IncompleteAccountRequest { + block_hash: req.header.hash().into(), + address_hash: ::util::Hashable::sha3(&req.address).into(), + }), + Pending::Code(ref req, _) => NetworkRequest::Code(basic_request::IncompleteCodeRequest { + block_hash: req.block_id.0.into(), + code_hash: req.code_hash.into(), + }), + Pending::TxProof(ref req, _) => NetworkRequest::Execution(basic_request::IncompleteExecutionRequest { + block_hash: req.header.hash().into(), + from: req.tx.sender(), + gas: req.tx.gas, + gas_price: req.tx.gas_price, + action: req.tx.action.clone(), + value: req.tx.value, + data: req.tx.data.clone(), + }), + } + } +} + /// On demand request service. See module docs for more details. /// Accumulates info about all peers' capabilities and dispatches /// requests to them accordingly. @@ -85,25 +146,25 @@ impl OnDemand { } } - /// Request a header by block number and CHT root hash. - /// Returns the header. - pub fn header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver { + /// Request a header's hash by block number and CHT root hash. + /// Returns the hash. + pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); - cache.block_hash(&req.num()).and_then(|hash| cache.block_header(&hash)) + cache.block_hash(&req.num()) }; match cached { - Some(hdr) => sender.complete(hdr), - None => self.dispatch_header_by_number(ctx, req, ChtProofSender::Header(sender)), + Some(hash) => sender.complete(hash), + None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), } receiver } /// Request a canonical block's chain score. /// Returns the chain score. - pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver { + pub fn chain_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); @@ -112,71 +173,33 @@ impl OnDemand { match cached { Some(score) => sender.complete(score), - None => self.dispatch_header_by_number(ctx, req, ChtProofSender::ChainScore(sender)), + None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), } receiver } - /// Request a canonical block's chain score. - /// Returns the header and chain score. - pub fn header_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber) -> Receiver<(encoded::Header, U256)> { + /// Request a canonical block's hash and chain score by number. + /// Returns the hash and chain score. + pub fn hash_and_score_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> Receiver<(H256, U256)> { let (sender, receiver) = oneshot::channel(); let cached = { let mut cache = self.cache.lock(); let hash = cache.block_hash(&req.num()); ( - hash.clone().and_then(|hash| cache.block_header(&hash)), + hash.clone(), hash.and_then(|hash| cache.chain_score(&hash)), ) }; match cached { - (Some(hdr), Some(score)) => sender.complete((hdr, score)), - _ => self.dispatch_header_by_number(ctx, req, ChtProofSender::Both(sender)), + (Some(hash), Some(score)) => sender.complete((hash, score)), + _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), } receiver } - // dispatch the request, completing the request if no peers available. - fn dispatch_header_by_number(&self, ctx: &BasicContext, req: request::HeaderByNumber, sender: ChtProofSender) { - let num = req.num(); - let cht_num = req.cht_num(); - - let les_req = LesRequest::HeaderProofs(les_request::HeaderProofs { - requests: vec![les_request::HeaderProof { - cht_number: cht_num, - block_number: num, - from_level: 0, - }], - }); - - let pending = Pending::HeaderByNumber(req, sender); - - // we're looking for a peer with serveHeaders who's far enough along in the - // chain. - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_headers && peer.status.head_num >= num { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - }, - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request a header by hash. This is less accurate than by-number because we don't know /// where in the chain this header lies, and therefore can't find a peer who is supposed to have /// it as easily. @@ -184,50 +207,11 @@ impl OnDemand { let (sender, receiver) = oneshot::channel(); match self.cache.lock().block_header(&req.0) { Some(hdr) => sender.complete(hdr), - None => self.dispatch_header_by_hash(ctx, req, sender), + None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } receiver } - fn dispatch_header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash, sender: Sender) { - let les_req = LesRequest::Headers(les_request::Headers { - start: req.0.into(), - max: 1, - skip: 0, - reverse: false, - }); - - // all we've got is a hash, so we'll just guess at peers who might have - // it randomly. - let mut potential_peers = self.peers.read().iter() - .filter(|&(_, peer)| peer.capabilities.serve_headers) - .map(|(id, _)| *id) - .collect::>(); - - let mut rng = ::rand::thread_rng(); - ::rand::Rng::shuffle(&mut rng, &mut potential_peers); - - let pending = Pending::HeaderByHash(req, sender); - - for id in potential_peers { - match ctx.request_from(id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request a block, given its header. Block bodies are requestable by hash only, /// and the header is required anyway to verify and complete the block body /// -- this just doesn't obscure the network query. @@ -251,41 +235,12 @@ impl OnDemand { sender.complete(encoded::Block::new(stream.out())); } - None => self.dispatch_block(ctx, req, sender), + None => self.dispatch(ctx, Pending::Block(req, sender)), } } receiver } - fn dispatch_block(&self, ctx: &BasicContext, req: request::Body, sender: Sender) { - let num = req.header.number(); - let les_req = LesRequest::Bodies(les_request::Bodies { - block_hashes: vec![req.hash], - }); - let pending = Pending::Block(req, sender); - - // we're looking for a peer with serveChainSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request the receipts for a block. The header serves two purposes: /// provide the block hash to fetch receipts for, and for verification of the receipts root. pub fn block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts) -> Receiver> { @@ -297,84 +252,21 @@ impl OnDemand { } else { match self.cache.lock().block_receipts(&req.0.hash()) { Some(receipts) => sender.complete(receipts), - None => self.dispatch_block_receipts(ctx, req, sender), + None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } } receiver } - fn dispatch_block_receipts(&self, ctx: &BasicContext, req: request::BlockReceipts, sender: Sender>) { - let num = req.0.number(); - let les_req = LesRequest::Receipts(les_request::Receipts { - block_hashes: vec![req.0.hash()], - }); - let pending = Pending::BlockReceipts(req, sender); - - // we're looking for a peer with serveChainSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request an account by address and block header -- which gives a hash to query and a state root /// to verify against. - pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver { + pub fn account(&self, ctx: &BasicContext, req: request::Account) -> Receiver> { let (sender, receiver) = oneshot::channel(); - self.dispatch_account(ctx, req, sender); + self.dispatch(ctx, Pending::Account(req, sender)); receiver } - fn dispatch_account(&self, ctx: &BasicContext, req: request::Account, sender: Sender) { - let num = req.header.number(); - let les_req = LesRequest::StateProofs(les_request::StateProofs { - requests: vec![les_request::StateProof { - block: req.header.hash(), - key1: ::util::Hashable::sha3(&req.address), - key2: None, - from_level: 0, - }], - }); - let pending = Pending::Account(req, sender); - - // we're looking for a peer with serveStateSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending, - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request code by address, known code hash, and block header. pub fn code(&self, ctx: &BasicContext, req: request::Code) -> Receiver { let (sender, receiver) = oneshot::channel(); @@ -383,88 +275,50 @@ impl OnDemand { if req.code_hash == ::util::sha3::SHA3_EMPTY { sender.complete(Vec::new()) } else { - self.dispatch_code(ctx, req, sender); + self.dispatch(ctx, Pending::Code(req, sender)); } receiver } - fn dispatch_code(&self, ctx: &BasicContext, req: request::Code, sender: Sender) { - let num = req.block_id.1; - let les_req = LesRequest::Codes(les_request::ContractCodes { - code_requests: vec![les_request::ContractCode { - block_hash: req.block_id.0, - account_key: ::util::Hashable::sha3(&req.address), - }] - }); - let pending = Pending::Code(req, sender); - - // we're looking for a peer with serveStateSince(num) - for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), - } - } - } - - trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) - } - /// Request proof-of-execution for a transaction. pub fn transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof) -> Receiver> { let (sender, receiver) = oneshot::channel(); - self.dispatch_transaction_proof(ctx, req, sender); + self.dispatch(ctx, Pending::TxProof(req, sender)); receiver } - fn dispatch_transaction_proof(&self, ctx: &BasicContext, req: request::TransactionProof, sender: Sender>) { - let num = req.header.number(); - let les_req = LesRequest::TransactionProof(les_request::TransactionProof { - at: req.header.hash(), - from: req.tx.sender(), - gas: req.tx.gas, - gas_price: req.tx.gas_price, - action: req.tx.action.clone(), - value: req.tx.value, - data: req.tx.data.clone(), - }); - let pending = Pending::TxProof(req, sender); + // dispatch the request, with a "suitability" function to filter acceptable peers. + fn dispatch(&self, ctx: &BasicContext, pending: Pending) { + let mut builder = basic_request::RequestBuilder::default(); + builder.push(pending.make_request()) + .expect("make_request always returns fully complete request; qed"); + + let complete = builder.build(); - // we're looking for a peer with serveStateSince(num) for (id, peer) in self.peers.read().iter() { - if peer.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= num) { - match ctx.request_from(*id, les_req.clone()) { - Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); - self.pending_requests.write().insert( - req_id, - pending - ); - return - } - Err(e) => - trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), + if !peer.can_handle(&pending) { continue } + match ctx.request_from(*id, complete.clone()) { + Ok(req_id) => { + trace!(target: "on_demand", "Assigning request to peer {}", id); + self.pending_requests.write().insert( + req_id, + pending, + ); + return } + Err(e) => + trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), } } trace!(target: "on_demand", "No suitable peer for request"); - self.orphaned_requests.write().push(pending) + self.orphaned_requests.write().push(pending); } + // dispatch orphaned requests, and discard those for which the corresponding // receiver has been dropped. fn dispatch_orphaned(&self, ctx: &BasicContext) { @@ -494,30 +348,22 @@ impl OnDemand { let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); - for orphaned in to_dispatch { - match orphaned { - Pending::HeaderByNumber(req, mut sender) => { - let hangup = match sender { + for mut orphaned in to_dispatch { + let hung_up = match orphaned { + Pending::HeaderProof(_, ref mut sender) => match *sender { ChtProofSender::Both(ref mut s) => check_hangup(s), - ChtProofSender::Header(ref mut s) => check_hangup(s), + ChtProofSender::Hash(ref mut s) => check_hangup(s), ChtProofSender::ChainScore(ref mut s) => check_hangup(s), - }; + }, + Pending::HeaderByHash(_, ref mut sender) => check_hangup(sender), + Pending::Block(_, ref mut sender) => check_hangup(sender), + Pending::BlockReceipts(_, ref mut sender) => check_hangup(sender), + Pending::Account(_, ref mut sender) => check_hangup(sender), + Pending::Code(_, ref mut sender) => check_hangup(sender), + Pending::TxProof(_, ref mut sender) => check_hangup(sender), + }; - if !hangup { self.dispatch_header_by_number(ctx, req, sender) } - } - Pending::HeaderByHash(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_header_by_hash(ctx, req, sender) }, - Pending::Block(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_block(ctx, req, sender) }, - Pending::BlockReceipts(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_block_receipts(ctx, req, sender) }, - Pending::Account(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_account(ctx, req, sender) }, - Pending::Code(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_code(ctx, req, sender) }, - Pending::TxProof(req, mut sender) => - if !check_hangup(&mut sender) { self.dispatch_transaction_proof(ctx, req, sender) } - } + if !hung_up { self.dispatch(ctx, orphaned) } } } } @@ -555,218 +401,126 @@ impl Handler for OnDemand { self.dispatch_orphaned(ctx.as_basic()); } - fn on_header_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[(Bytes, Vec)]) { + fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) { let peer = ctx.peer(); let req = match self.pending_requests.write().remove(&req_id) { Some(req) => req, None => return, }; + let response = match responses.get(0) { + Some(response) => response, + None => { + trace!(target: "on_demand", "Ignoring empty response for request {}", req_id); + self.dispatch(ctx.as_basic(), req); + return; + } + }; + + // handle the response appropriately for the request. + // all branches which do not return early lead to disabling of the peer + // due to misbehavior. match req { - Pending::HeaderByNumber(req, sender) => { - if let Some(&(ref header, ref proof)) = proofs.get(0) { - match req.check_response(header, proof) { - Ok((header, score)) => { + Pending::HeaderProof(req, sender) => { + if let NetworkResponse::HeaderProof(ref response) = *response { + match req.check_response(&response.proof) { + Ok((hash, score)) => { let mut cache = self.cache.lock(); - let hash = header.hash(); - cache.insert_block_header(hash, header.clone()); - cache.insert_block_hash(header.number(), hash); + cache.insert_block_hash(req.num(), hash); cache.insert_chain_score(hash, score); match sender { - ChtProofSender::Both(sender) => sender.complete((header, score)), - ChtProofSender::Header(sender) => sender.complete(header), + ChtProofSender::Both(sender) => sender.complete((hash, score)), + ChtProofSender::Hash(sender) => sender.complete(hash), ChtProofSender::ChainScore(sender) => sender.complete(score), } - return } - Err(e) => { - warn!("Error handling response for header request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for header request: {:?}", e), } } - - self.dispatch_header_by_number(ctx.as_basic(), req, sender); } - _ => panic!("Only header by number request fetches header proofs; qed"), - } - } - - fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::HeaderByHash(req, sender) => { - if let Some(ref header) = headers.get(0) { - match req.check_response(header) { - Ok(header) => { - self.cache.lock().insert_block_header(req.0, header.clone()); - sender.complete(header); - return - } - Err(e) => { - warn!("Error handling response for header request: {:?}", e); - ctx.disable_peer(peer); + if let NetworkResponse::Headers(ref response) = *response { + if let Some(header) = response.headers.get(0) { + match req.check_response(header) { + Ok(header) => { + self.cache.lock().insert_block_header(req.0, header.clone()); + sender.complete(header); + return + } + Err(e) => warn!("Error handling response for header request: {:?}", e), } } } - - self.dispatch_header_by_hash(ctx.as_basic(), req, sender); } - _ => panic!("Only header by hash request fetches headers; qed"), - } - } - - fn on_block_bodies(&self, ctx: &EventContext, req_id: ReqId, bodies: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Block(req, sender) => { - if let Some(ref body) = bodies.get(0) { - match req.check_response(body) { + if let NetworkResponse::Body(ref response) = *response { + match req.check_response(&response.body) { Ok(block) => { - let body = encoded::Body::new(body.to_vec()); - self.cache.lock().insert_block_body(req.hash, body); + self.cache.lock().insert_block_body(req.hash, response.body.clone()); sender.complete(block); return } - Err(e) => { - warn!("Error handling response for block request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for block request: {:?}", e), } } - - self.dispatch_block(ctx.as_basic(), req, sender); } - _ => panic!("Only block request fetches bodies; qed"), - } - } - - fn on_receipts(&self, ctx: &EventContext, req_id: ReqId, receipts: &[Vec]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::BlockReceipts(req, sender) => { - if let Some(ref receipts) = receipts.get(0) { - match req.check_response(receipts) { + if let NetworkResponse::Receipts(ref response) = *response { + match req.check_response(&response.receipts) { Ok(receipts) => { let hash = req.0.hash(); self.cache.lock().insert_block_receipts(hash, receipts.clone()); sender.complete(receipts); return } - Err(e) => { - warn!("Error handling response for receipts request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for receipts request: {:?}", e), } } - - self.dispatch_block_receipts(ctx.as_basic(), req, sender); } - _ => panic!("Only receipts request fetches receipts; qed"), - } - } - - fn on_state_proofs(&self, ctx: &EventContext, req_id: ReqId, proofs: &[Vec]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Account(req, sender) => { - if let Some(ref proof) = proofs.get(0) { - match req.check_response(proof) { - Ok(proof) => { - sender.complete(proof); + if let NetworkResponse::Account(ref response) = *response { + match req.check_response(&response.proof) { + Ok(maybe_account) => { + // TODO: validate against request outputs. + // needs engine + env info as part of request. + sender.complete(maybe_account); return } - Err(e) => { - warn!("Error handling response for state request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for state request: {:?}", e), } } - - self.dispatch_account(ctx.as_basic(), req, sender); } - _ => panic!("Only account request fetches state proof; qed"), - } - } - - fn on_code(&self, ctx: &EventContext, req_id: ReqId, codes: &[Bytes]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::Code(req, sender) => { - if let Some(code) = codes.get(0) { - match req.check_response(code.as_slice()) { + if let NetworkResponse::Code(ref response) = *response { + match req.check_response(response.code.as_slice()) { Ok(()) => { - sender.complete(code.clone()); + sender.complete(response.code.clone()); return } - Err(e) => { - warn!("Error handling response for code request: {:?}", e); - ctx.disable_peer(peer); - } + Err(e) => warn!("Error handling response for code request: {:?}", e), } - - self.dispatch_code(ctx.as_basic(), req, sender); } } - _ => panic!("Only code request fetches code; qed"), - } - } - - fn on_transaction_proof(&self, ctx: &EventContext, req_id: ReqId, items: &[DBValue]) { - let peer = ctx.peer(); - let req = match self.pending_requests.write().remove(&req_id) { - Some(req) => req, - None => return, - }; - - match req { Pending::TxProof(req, sender) => { - match req.check_response(items) { - ProvedExecution::Complete(executed) => { - sender.complete(Ok(executed)); - return - } - ProvedExecution::Failed(err) => { - sender.complete(Err(err)); - return - } - ProvedExecution::BadProof => { - warn!("Error handling response for transaction proof request"); - ctx.disable_peer(peer); + if let NetworkResponse::Execution(ref response) = *response { + match req.check_response(&response.items) { + ProvedExecution::Complete(executed) => { + sender.complete(Ok(executed)); + return + } + ProvedExecution::Failed(err) => { + sender.complete(Err(err)); + return + } + ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), } } - - self.dispatch_transaction_proof(ctx.as_basic(), req, sender); } - _ => panic!("Only transaction proof request dispatches transaction proof requests; qed"), } + + ctx.disable_peer(peer); } fn tick(&self, ctx: &BasicContext) { diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 3a72db51d..4f028a71c 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -61,9 +61,9 @@ impl From> for Error { } } -/// Request for a header by number. +/// Request for header proof by number #[derive(Debug, Clone, PartialEq, Eq)] -pub struct HeaderByNumber { +pub struct HeaderProof { /// The header's number. num: u64, /// The cht number for the given block number. @@ -72,11 +72,11 @@ pub struct HeaderByNumber { cht_root: H256, } -impl HeaderByNumber { +impl HeaderProof { /// Construct a new header-by-number request. Fails if the given number is 0. /// Provide the expected CHT root to compare against. pub fn new(num: u64, cht_root: H256) -> Option { - ::cht::block_to_cht_number(num).map(|cht_num| HeaderByNumber { + ::cht::block_to_cht_number(num).map(|cht_num| HeaderProof { num: num, cht_num: cht_num, cht_root: cht_root, @@ -92,18 +92,11 @@ impl HeaderByNumber { /// Access the expected CHT root. pub fn cht_root(&self) -> H256 { self.cht_root } - /// Check a response with a header and cht proof. - pub fn check_response(&self, header: &[u8], proof: &[Bytes]) -> Result<(encoded::Header, U256), Error> { - let (expected_hash, td) = match ::cht::check_proof(proof, self.num, self.cht_root) { - Some((expected_hash, td)) => (expected_hash, td), - None => return Err(Error::BadProof), - }; - - // and compare the hash to the found header. - let found_hash = header.sha3(); - match expected_hash == found_hash { - true => Ok((encoded::Header::new(header.to_vec()), td)), - false => Err(Error::WrongHash(expected_hash, found_hash)), + /// Check a response with a CHT proof, get a hash and total difficulty back. + pub fn check_response(&self, proof: &[Bytes]) -> Result<(H256, U256), Error> { + match ::cht::check_proof(proof, self.num, self.cht_root) { + Some((expected_hash, td)) => Ok((expected_hash, td)), + None => Err(Error::BadProof), } } } @@ -114,10 +107,10 @@ pub struct HeaderByHash(pub H256); impl HeaderByHash { /// Check a response for the header. - pub fn check_response(&self, header: &[u8]) -> Result { + pub fn check_response(&self, header: &encoded::Header) -> Result { let hash = header.sha3(); match hash == self.0 { - true => Ok(encoded::Header::new(header.to_vec())), + true => Ok(header.clone()), false => Err(Error::WrongHash(self.0, hash)), } } @@ -143,16 +136,14 @@ impl Body { } /// Check a response for this block body. - pub fn check_response(&self, body: &[u8]) -> Result { - let body_view = UntrustedRlp::new(&body); - + pub fn check_response(&self, body: &encoded::Body) -> Result { // check the integrity of the the body against the header - let tx_root = ::util::triehash::ordered_trie_root(body_view.at(0)?.iter().map(|r| r.as_raw().to_vec())); + let tx_root = ::util::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec())); if tx_root != self.header.transactions_root() { return Err(Error::WrongTrieRoot(self.header.transactions_root(), tx_root)); } - let uncles_hash = body_view.at(1)?.as_raw().sha3(); + let uncles_hash = body.rlp().at(1).as_raw().sha3(); if uncles_hash != self.header.uncles_hash() { return Err(Error::WrongHash(self.header.uncles_hash(), uncles_hash)); } @@ -160,7 +151,7 @@ impl Body { // concatenate the header and the body. let mut stream = RlpStream::new_list(3); stream.append_raw(self.header.rlp().as_raw(), 1); - stream.append_raw(body, 2); + stream.append_raw(&body.rlp().as_raw(), 2); Ok(encoded::Block::new(stream.out())) } @@ -194,7 +185,7 @@ pub struct Account { impl Account { /// Check a response with an account against the stored header. - pub fn check_response(&self, proof: &[Bytes]) -> Result { + pub fn check_response(&self, proof: &[Bytes]) -> Result, Error> { let state_root = self.header.state_root(); let mut db = MemoryDB::new(); @@ -203,14 +194,14 @@ impl Account { match TrieDB::new(&db, &state_root).and_then(|t| t.get(&self.address.sha3()))? { Some(val) => { let rlp = UntrustedRlp::new(&val); - Ok(BasicAccount { + Ok(Some(BasicAccount { nonce: rlp.val_at(0)?, balance: rlp.val_at(1)?, storage_root: rlp.val_at(2)?, code_hash: rlp.val_at(3)?, - }) + })) }, - None => Err(Error::BadProof) + None => Ok(None), } } } @@ -219,8 +210,6 @@ impl Account { pub struct Code { /// Block hash, number pair. pub block_id: (H256, u64), - /// Address requested. - pub address: Address, /// Account's code hash. pub code_hash: H256, } @@ -278,11 +267,11 @@ mod tests { #[test] fn no_invalid_header_by_number() { - assert!(HeaderByNumber::new(0, Default::default()).is_none()) + assert!(HeaderProof::new(0, Default::default()).is_none()) } #[test] - fn check_header_by_number() { + fn check_header_proof() { use ::cht; let test_client = TestBlockChainClient::new(); @@ -303,11 +292,11 @@ mod tests { }; let proof = cht.prove(10_000, 0).unwrap().unwrap(); - let req = HeaderByNumber::new(10_000, cht.root()).unwrap(); + let req = HeaderProof::new(10_000, cht.root()).unwrap(); let raw_header = test_client.block_header(::ethcore::ids::BlockId::Number(10_000)).unwrap(); - assert!(req.check_response(&raw_header.into_inner(), &proof[..]).is_ok()); + assert!(req.check_response(&proof[..]).is_ok()); } #[test] @@ -334,7 +323,8 @@ mod tests { hash: header.hash(), }; - assert!(req.check_response(&*body_stream.drain()).is_ok()) + let response = encoded::Body::new(body_stream.drain()); + assert!(req.check_response(&response).is_ok()) } #[test] @@ -412,7 +402,6 @@ mod tests { let code = vec![1u8; 256]; let req = Code { block_id: (Default::default(), 2), - address: Default::default(), code_hash: ::util::Hashable::sha3(&code), }; diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 1ebe1c75b..a3880da44 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -202,6 +202,8 @@ impl Encodable for HashOrNumber { } /// All request types, as they're sent over the network. +/// They may be incomplete, with back-references to outputs +/// of prior requests. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Request { /// A request for block headers. @@ -223,7 +225,7 @@ pub enum Request { Execution(IncompleteExecutionRequest), } -/// All request types, as they're sent over the network. +/// All request types, in an answerable state. #[derive(Debug, Clone, PartialEq, Eq)] pub enum CompleteRequest { /// A request for block headers. From 04f106aad8418a4a769a8e90fcbe99998cf8c543 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 20:29:06 +0100 Subject: [PATCH 37/89] migrate oneshot::complete to send in on_demand --- ethcore/light/src/on_demand/mod.rs | 42 +++++++++++++++--------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index df8a6c6a9..1428efa50 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -31,7 +31,7 @@ use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver}; use network::PeerId; use rlp::{RlpStream, Stream}; -use util::{Bytes, DBValue, RwLock, Mutex, U256, H256}; +use util::{Bytes, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; @@ -52,7 +52,7 @@ impl Peer { match *pending { Pending::HeaderProof(ref req, _) => self.capabilities.serve_headers && self.status.head_num > req.num(), - Pending::HeaderByHash(ref req, _) => self.capabilities.serve_headers, + Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, Pending::Block(ref req, _) => self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), Pending::BlockReceipts(ref req, _) => @@ -156,7 +156,7 @@ impl OnDemand { }; match cached { - Some(hash) => sender.complete(hash), + Some(hash) => sender.send(hash).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Hash(sender))), } receiver @@ -172,7 +172,7 @@ impl OnDemand { }; match cached { - Some(score) => sender.complete(score), + Some(score) => sender.send(score).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::ChainScore(sender))), } @@ -193,7 +193,7 @@ impl OnDemand { }; match cached { - (Some(hash), Some(score)) => sender.complete((hash, score)), + (Some(hash), Some(score)) => sender.send((hash, score)).expect("receiver alive here; qed"), _ => self.dispatch(ctx, Pending::HeaderProof(req, ChtProofSender::Both(sender))), } @@ -206,7 +206,7 @@ impl OnDemand { pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { let (sender, receiver) = oneshot::channel(); match self.cache.lock().block_header(&req.0) { - Some(hdr) => sender.complete(hdr), + Some(hdr) => sender.send(hdr).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } receiver @@ -225,7 +225,7 @@ impl OnDemand { stream.begin_list(0); stream.begin_list(0); - sender.complete(encoded::Block::new(stream.out())) + sender.send(encoded::Block::new(stream.out())).expect("receiver alive here; qed"); } else { match self.cache.lock().block_body(&req.hash) { Some(body) => { @@ -233,7 +233,7 @@ impl OnDemand { stream.append_raw(&req.header.into_inner(), 1); stream.append_raw(&body.into_inner(), 2); - sender.complete(encoded::Block::new(stream.out())); + sender.send(encoded::Block::new(stream.out())).expect("receiver alive here; qed"); } None => self.dispatch(ctx, Pending::Block(req, sender)), } @@ -248,10 +248,10 @@ impl OnDemand { // fast path for empty receipts. if req.0.receipts_root() == SHA3_NULL_RLP { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect("receiver alive here; qed"); } else { match self.cache.lock().block_receipts(&req.0.hash()) { - Some(receipts) => sender.complete(receipts), + Some(receipts) => sender.send(receipts).expect("receiver alive here; qed"), None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } } @@ -273,7 +273,7 @@ impl OnDemand { // fast path for no code. if req.code_hash == ::util::sha3::SHA3_EMPTY { - sender.complete(Vec::new()) + sender.send(Vec::new()).expect("receiver alive here; qed") } else { self.dispatch(ctx, Pending::Code(req, sender)); } @@ -430,9 +430,9 @@ impl Handler for OnDemand { cache.insert_chain_score(hash, score); match sender { - ChtProofSender::Both(sender) => sender.complete((hash, score)), - ChtProofSender::Hash(sender) => sender.complete(hash), - ChtProofSender::ChainScore(sender) => sender.complete(score), + ChtProofSender::Both(sender) => { let _ = sender.send((hash, score)); } + ChtProofSender::Hash(sender) => { let _ = sender.send(hash); } + ChtProofSender::ChainScore(sender) => { let _ = sender.send(score); } } return } @@ -446,7 +446,7 @@ impl Handler for OnDemand { match req.check_response(header) { Ok(header) => { self.cache.lock().insert_block_header(req.0, header.clone()); - sender.complete(header); + let _ = sender.send(header); return } Err(e) => warn!("Error handling response for header request: {:?}", e), @@ -459,7 +459,7 @@ impl Handler for OnDemand { match req.check_response(&response.body) { Ok(block) => { self.cache.lock().insert_block_body(req.hash, response.body.clone()); - sender.complete(block); + let _ = sender.send(block); return } Err(e) => warn!("Error handling response for block request: {:?}", e), @@ -472,7 +472,7 @@ impl Handler for OnDemand { Ok(receipts) => { let hash = req.0.hash(); self.cache.lock().insert_block_receipts(hash, receipts.clone()); - sender.complete(receipts); + let _ = sender.send(receipts); return } Err(e) => warn!("Error handling response for receipts request: {:?}", e), @@ -485,7 +485,7 @@ impl Handler for OnDemand { Ok(maybe_account) => { // TODO: validate against request outputs. // needs engine + env info as part of request. - sender.complete(maybe_account); + let _ = sender.send(maybe_account); return } Err(e) => warn!("Error handling response for state request: {:?}", e), @@ -496,7 +496,7 @@ impl Handler for OnDemand { if let NetworkResponse::Code(ref response) = *response { match req.check_response(response.code.as_slice()) { Ok(()) => { - sender.complete(response.code.clone()); + let _ = sender.send(response.code.clone()); return } Err(e) => warn!("Error handling response for code request: {:?}", e), @@ -507,11 +507,11 @@ impl Handler for OnDemand { if let NetworkResponse::Execution(ref response) = *response { match req.check_response(&response.items) { ProvedExecution::Complete(executed) => { - sender.complete(Ok(executed)); + let _ = sender.send(Ok(executed)); return } ProvedExecution::Failed(err) => { - sender.complete(Err(err)); + let _ = sender.send(Err(err)); return } ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), From b5527415d6bf10c44ba14e307d812219fdd9b1f1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 20:33:45 +0100 Subject: [PATCH 38/89] get on_demand tests to compile --- ethcore/light/src/on_demand/mod.rs | 5 ++--- ethcore/light/src/on_demand/request.rs | 8 +++----- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 1428efa50..aceba66e2 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -536,7 +536,7 @@ mod tests { use cache::Cache; use net::{Announcement, BasicContext, ReqId, Error as LesError}; - use request::{Request as LesRequest, Kind as LesRequestKind}; + use request::Requests; use network::{PeerId, NodeId}; use time::Duration; @@ -546,11 +546,10 @@ mod tests { impl BasicContext for FakeContext { fn persistent_peer_id(&self, _: PeerId) -> Option { None } - fn request_from(&self, _: PeerId, _: LesRequest) -> Result { + fn request_from(&self, _: PeerId, _: Requests) -> Result { unimplemented!() } fn make_announcement(&self, _: Announcement) { } - fn max_requests(&self, _: PeerId, _: LesRequestKind) -> usize { 0 } fn disconnect_peer(&self, _: PeerId) { } fn disable_peer(&self, _: PeerId) { } } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 4f028a71c..825ca2be2 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -294,8 +294,6 @@ mod tests { let proof = cht.prove(10_000, 0).unwrap().unwrap(); let req = HeaderProof::new(10_000, cht.root()).unwrap(); - let raw_header = test_client.block_header(::ethcore::ids::BlockId::Number(10_000)).unwrap(); - assert!(req.check_response(&proof[..]).is_ok()); } @@ -305,9 +303,9 @@ mod tests { header.set_number(10_000); header.set_extra_data(b"test_header".to_vec()); let hash = header.hash(); - let raw_header = ::rlp::encode(&header); + let raw_header = encoded::Header::new(::rlp::encode(&header).to_vec()); - assert!(HeaderByHash(hash).check_response(&*raw_header).is_ok()) + assert!(HeaderByHash(hash).check_response(&raw_header).is_ok()) } #[test] @@ -323,7 +321,7 @@ mod tests { hash: header.hash(), }; - let response = encoded::Body::new(body_stream.drain()); + let response = encoded::Body::new(body_stream.drain().to_vec()); assert!(req.check_response(&response).is_ok()) } From fa42b6acecab326f773ea3ec7a4d342b02d0dd18 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 16 Mar 2017 23:51:47 +0100 Subject: [PATCH 39/89] port ethsync to PIP messages --- sync/src/light_sync/mod.rs | 65 ++++++++++++++++++--------- sync/src/light_sync/response.rs | 34 +++++++------- sync/src/light_sync/sync_round.rs | 27 +++++------ sync/src/light_sync/tests/test_net.rs | 6 ++- 4 files changed, 79 insertions(+), 53 deletions(-) diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index fba89dd7b..4590103e7 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -16,7 +16,7 @@ //! Light client synchronization. //! -//! This will synchronize the header chain using LES messages. +//! This will synchronize the header chain using PIP messages. //! Dataflow is largely one-directional as headers are pushed into //! the light client queue for import. Where possible, they are batched //! in groups. @@ -36,14 +36,15 @@ use std::collections::HashMap; use std::mem; use std::sync::Arc; +use ethcore::encoded; use light::client::{AsLightClient, LightChainClient}; use light::net::{ Announcement, Handler, BasicContext, EventContext, - Capabilities, ReqId, Status, + Capabilities, ReqId, Status, Error as NetError, }; -use light::request; +use light::request::{self, CompleteHeadersRequest as HeadersRequest}; use network::PeerId; -use util::{Bytes, U256, H256, Mutex, RwLock}; +use util::{U256, H256, Mutex, RwLock}; use rand::{Rng, OsRng}; use self::sync_round::{AbortReason, SyncRound, ResponseContext}; @@ -91,7 +92,7 @@ impl Peer { #[derive(Debug)] enum AncestorSearch { Queued(u64), // queued to search for blocks starting from here. - Awaiting(ReqId, u64, request::Headers), // awaiting response for this request. + Awaiting(ReqId, u64, HeadersRequest), // awaiting response for this request. Prehistoric, // prehistoric block found. TODO: start to roll back CHTs. FoundCommon(u64, H256), // common block found. Genesis, // common ancestor is the genesis. @@ -113,7 +114,7 @@ impl AncestorSearch { match self { AncestorSearch::Awaiting(id, start, req) => { if &id == ctx.req_id() { - match response::decode_and_verify(ctx.data(), &req) { + match response::verify(ctx.data(), &req) { Ok(headers) => { for header in &headers { if client.is_known(&header.hash()) { @@ -150,17 +151,17 @@ impl AncestorSearch { } fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch - where F: FnMut(request::Headers) -> Option + where F: FnMut(HeadersRequest) -> Option { - const BATCH_SIZE: usize = 64; + const BATCH_SIZE: u64 = 64; match self { AncestorSearch::Queued(start) => { - let batch_size = ::std::cmp::min(start as usize, BATCH_SIZE); + let batch_size = ::std::cmp::min(start, BATCH_SIZE); trace!(target: "sync", "Requesting {} reverse headers from {} to find common ancestor", batch_size, start); - let req = request::Headers { + let req = HeadersRequest { start: start.into(), max: batch_size, skip: 0, @@ -193,13 +194,13 @@ struct ResponseCtx<'a> { peer: PeerId, req_id: ReqId, ctx: &'a BasicContext, - data: &'a [Bytes], + data: &'a [encoded::Header], } impl<'a> ResponseContext for ResponseCtx<'a> { fn responder(&self) -> PeerId { self.peer } fn req_id(&self) -> &ReqId { &self.req_id } - fn data(&self) -> &[Bytes] { self.data } + fn data(&self) -> &[encoded::Header] { self.data } fn punish_responder(&self) { self.ctx.disable_peer(self.peer) } } @@ -313,11 +314,22 @@ impl Handler for LightSync { self.maintain_sync(ctx.as_basic()); } - fn on_block_headers(&self, ctx: &EventContext, req_id: ReqId, headers: &[Bytes]) { - if !self.peers.read().contains_key(&ctx.peer()) { + fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[request::Response]) { + let peer = ctx.peer(); + if !self.peers.read().contains_key(&peer) { return } + let headers = match responses.get(0) { + Some(&request::Response::Headers(ref response)) => &response.headers[..], + Some(_) => { + trace!("Disabling peer {} for wrong response type.", peer); + ctx.disable_peer(peer); + &[] + } + None => &[], + }; + { let mut state = self.state.lock(); @@ -465,18 +477,27 @@ impl LightSync { // naive request dispatcher: just give to any peer which says it will // give us responses. - let dispatcher = move |req: request::Headers| { + let dispatcher = move |req: HeadersRequest| { rng.shuffle(&mut peer_ids); + let request = { + let mut builder = request::RequestBuilder::default(); + builder.push(request::Request::Headers(request::IncompleteHeadersRequest { + start: req.start.into(), + skip: req.skip, + max: req.max, + reverse: req.reverse, + })).expect("request provided fully complete with no unresolved back-references; qed"); + builder.build() + }; for peer in &peer_ids { - if ctx.max_requests(*peer, request::Kind::Headers) >= req.max { - match ctx.request_from(*peer, request::Request::Headers(req.clone())) { - Ok(id) => { - return Some(id) - } - Err(e) => - trace!(target: "sync", "Error requesting headers from viable peer: {}", e), + match ctx.request_from(*peer, request.clone()) { + Ok(id) => { + return Some(id) } + Err(NetError::NoCredits) => {} + Err(e) => + trace!(target: "sync", "Error requesting headers from viable peer: {}", e), } } diff --git a/sync/src/light_sync/response.rs b/sync/src/light_sync/response.rs index cb95824ce..d85d2548d 100644 --- a/sync/src/light_sync/response.rs +++ b/sync/src/light_sync/response.rs @@ -18,10 +18,11 @@ use std::fmt; +use ethcore::encoded; use ethcore::header::Header; -use light::request::{HashOrNumber, Headers as HeadersRequest}; -use rlp::{DecoderError, UntrustedRlp, View}; -use util::{Bytes, H256}; +use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest}; +use rlp::DecoderError; +use util::H256; /// Errors found when decoding headers and verifying with basic constraints. #[derive(Debug, PartialEq)] @@ -71,13 +72,13 @@ pub trait Constraint { fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>; } -/// Decode a response and do basic verification against a request. -pub fn decode_and_verify(headers: &[Bytes], request: &HeadersRequest) -> Result, BasicError> { - let headers: Vec<_> = try!(headers.iter().map(|x| UntrustedRlp::new(&x).as_val()).collect()); +/// Do basic verification of provided headers against a request. +pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result, BasicError> { + let headers: Vec<_> = headers.iter().map(|h| h.decode()).collect(); let reverse = request.reverse; - try!(Max(request.max).verify(&headers, reverse)); + try!(Max(request.max as usize).verify(&headers, reverse)); match request.start { HashOrNumber::Number(ref num) => try!(StartsAtNumber(*num).verify(&headers, reverse)), HashOrNumber::Hash(ref hash) => try!(StartsAtHash(*hash).verify(&headers, reverse)), @@ -150,8 +151,9 @@ impl Constraint for Max { #[cfg(test)] mod tests { + use ethcore::encoded; use ethcore::header::Header; - use light::request::Headers as HeadersRequest; + use light::request::CompleteHeadersRequest as HeadersRequest; use super::*; @@ -175,10 +177,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert!(decode_and_verify(&headers, &request).is_ok()); + assert!(verify(&headers, &request).is_ok()); } #[test] @@ -201,10 +203,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert!(decode_and_verify(&headers, &request).is_ok()); + assert!(verify(&headers, &request).is_ok()); } #[test] @@ -227,10 +229,10 @@ mod tests { parent_hash = Some(header.hash()); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); + assert_eq!(verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25))); } #[test] @@ -246,9 +248,9 @@ mod tests { let mut header = Header::default(); header.set_number(x); - ::rlp::encode(&header).to_vec() + encoded::Header::new(::rlp::encode(&header).to_vec()) }).collect(); - assert_eq!(decode_and_verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); + assert_eq!(verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2)))); } } diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index 6fa635214..dfa17aad4 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -20,13 +20,14 @@ use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::fmt; +use ethcore::encoded; use ethcore::header::Header; use light::net::ReqId; -use light::request::Headers as HeadersRequest; +use light::request::CompleteHeadersRequest as HeadersRequest; use network::PeerId; -use util::{Bytes, H256}; +use util::H256; use super::response; @@ -40,7 +41,7 @@ pub trait ResponseContext { /// Get the request ID this response corresponds to. fn req_id(&self) -> &ReqId; /// Get the (unverified) response data. - fn data(&self) -> &[Bytes]; + fn data(&self) -> &[encoded::Header]; /// Punish the responder. fn punish_responder(&self); } @@ -114,7 +115,7 @@ impl Fetcher { let needed_headers = HeadersRequest { start: high_rung.parent_hash().clone().into(), - max: diff as usize - 1, + max: diff - 1, skip: 0, reverse: true, }; @@ -190,7 +191,7 @@ impl Fetcher { return SyncRound::Fetch(self); } - match response::decode_and_verify(headers, &request.headers_request) { + match response::verify(headers, &request.headers_request) { Err(e) => { trace!(target: "sync", "Punishing peer {} for invalid response ({})", ctx.responder(), e); ctx.punish_responder(); @@ -286,21 +287,21 @@ impl Fetcher { } // Compute scaffold parameters from non-zero distance between start and target block: (skip, pivots). -fn scaffold_params(diff: u64) -> (u64, usize) { +fn scaffold_params(diff: u64) -> (u64, u64) { // default parameters. // amount of blocks between each scaffold pivot. const ROUND_SKIP: u64 = 255; // amount of scaffold pivots: these are the Xs in "X___X___X" - const ROUND_PIVOTS: usize = 256; + const ROUND_PIVOTS: u64 = 256; let rem = diff % (ROUND_SKIP + 1); if diff <= ROUND_SKIP { // just request headers from the start to the target. - (0, rem as usize) + (0, rem) } else { // the number of pivots necessary to exactly hit or overshoot the target. let pivots_to_target = (diff / (ROUND_SKIP + 1)) + if rem == 0 { 0 } else { 1 }; - let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS as u64) as usize; + let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS); (ROUND_SKIP, num_pivots) } } @@ -319,7 +320,7 @@ pub struct RoundStart { contributors: HashSet, attempt: usize, skip: u64, - pivots: usize, + pivots: u64, } impl RoundStart { @@ -372,7 +373,7 @@ impl RoundStart { } }; - match response::decode_and_verify(ctx.data(), &req) { + match response::verify(ctx.data(), &req) { Ok(headers) => { if self.sparse_headers.len() == 0 && headers.get(0).map_or(false, |x| x.parent_hash() != &self.start_block.1) { @@ -383,7 +384,7 @@ impl RoundStart { self.contributors.insert(ctx.responder()); self.sparse_headers.extend(headers); - if self.sparse_headers.len() == self.pivots { + if self.sparse_headers.len() as u64 == self.pivots { return if self.skip == 0 { SyncRound::abort(AbortReason::TargetReached, self.sparse_headers.into()) } else { @@ -429,7 +430,7 @@ impl RoundStart { let start = (self.start_block.0 + 1) + self.sparse_headers.len() as u64 * (self.skip + 1); - let max = self.pivots - self.sparse_headers.len(); + let max = self.pivots - self.sparse_headers.len() as u64; let headers_request = HeadersRequest { start: start.into(), diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index d0e472374..898f8766d 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -28,6 +28,7 @@ use io::IoChannel; use light::client::Client as LightClient; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::net::request_credits::FlowParams; +use light::provider::LightProvider; use network::{NodeId, PeerId}; use util::RwLock; @@ -71,7 +72,7 @@ enum PeerData { } // test peer type. -// Either a full peer or a LES peer. +// Either a full peer or a light peer. pub struct Peer { proto: LightProtocol, queue: RwLock>, @@ -115,7 +116,8 @@ impl Peer { }, }; - let mut proto = LightProtocol::new(chain.clone(), params); + let provider = LightProvider::new(chain.clone(), Arc::new(RwLock::new(Default::default()))); + let mut proto = LightProtocol::new(Arc::new(provider), params); proto.add_handler(sync.clone()); Peer { proto: proto, From 2ee3a7282b279047697aa392901d516fd04bd23f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 17 Mar 2017 00:14:29 +0100 Subject: [PATCH 40/89] adjust to minor on_demand API changes in RPC --- rpc/src/v1/helpers/dispatch.rs | 5 ++++- rpc/src/v1/impls/light/eth.rs | 23 +++++++++++++++-------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index b11ada048..36d1b330f 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -268,7 +268,10 @@ impl LightDispatcher { })); match nonce_future { - Some(x) => x.map(|acc| acc.nonce).map_err(|_| errors::no_light_peers()).boxed(), + Some(x) => + x.map(|acc| acc.map_or_else(Default::default, |acc| acc.nonce)) + .map_err(|_| errors::no_light_peers()) + .boxed(), None => future::err(errors::network_disabled()).boxed() } } diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index f889faf00..a35d48fb6 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -105,15 +105,22 @@ impl EthClient { match cht_root { None => return future::ok(None).boxed(), Some(root) => { - let req = request::HeaderByNumber::new(n, root) + let req = request::HeaderProof::new(n, root) .expect("only fails for 0; client always stores genesis; client already queried; qed"); - self.sync.with_context(|ctx| - self.on_demand.header_by_number(ctx, req) - .map(Some) - .map_err(err_premature_cancel) - .boxed() - ) + let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); + self.sync.with_context(|ctx| { + let fut = self.on_demand.hash_by_number(ctx, req) + .map(request::HeaderByHash) + .map_err(err_premature_cancel); + + fut.and_then(move |req| { + match sync.with_context(|ctx| on_demand.header_by_hash(ctx, req)) { + Some(fut) => fut.map_err(err_premature_cancel).boxed(), + None => future::err(errors::network_disabled()).boxed(), + } + }).map(Some).boxed() + }) } } } @@ -149,7 +156,7 @@ impl EthClient { sync.with_context(|ctx| on_demand.account(ctx, request::Account { header: header, address: address, - }).map(Some)) + })) .map(|x| x.map_err(err_premature_cancel).boxed()) .unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) }).boxed() From c13f01c4f96bfe604df570154f0edb383e9bc3a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 17 Mar 2017 09:29:43 +0100 Subject: [PATCH 41/89] Using dedicated branch for jsonrpc --- Cargo.lock | 115 ++++++++++++++++----------------------- Cargo.toml | 6 +- dapps/Cargo.toml | 6 +- ipfs/Cargo.toml | 3 +- ipfs/src/error.rs | 8 +-- ipfs/src/lib.rs | 21 ++++--- rpc/Cargo.toml | 8 +-- rpc_client/Cargo.toml | 8 +-- rpc_client/src/client.rs | 20 ++++--- signer/Cargo.toml | 6 +- stratum/Cargo.toml | 6 +- 11 files changed, 95 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8343c4dc..a705d99db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5,7 +5,7 @@ dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", + "ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -27,9 +27,9 @@ dependencies = [ "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", + "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -291,7 +291,7 @@ dependencies = [ [[package]] name = "ctrlc" version = "1.1.1" -source = "git+https://github.com/ethcore/rust-ctrlc.git#f4927770f89eca80ec250911eea3adcbf579ac48" +source = "git+https://github.com/paritytech/rust-ctrlc.git#b523017108bb2d571a7a69bd97bc406e63bc7a9d" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -456,9 +456,9 @@ dependencies = [ "ethcore-util 1.7.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -631,10 +631,10 @@ dependencies = [ "ethsync 1.7.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", @@ -677,14 +677,14 @@ dependencies = [ "ethcore-io 1.7.0", "ethcore-rpc 1.7.0", "ethcore-util 1.7.0", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-ui 1.7.0", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)", + "ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable)", ] [[package]] @@ -698,9 +698,9 @@ dependencies = [ "ethcore-ipc-nano 1.7.0", "ethcore-util 1.7.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -980,25 +980,6 @@ dependencies = [ "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "hyper" -version = "0.10.0-a.0" -source = "git+https://github.com/ethcore/hyper#453c683b52208fefc32d29e4ac7c863439b2321f" -dependencies = [ - "cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rotor 0.6.3 (git+https://github.com/ethcore/rotor)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "hyper" version = "0.10.0-a.0" @@ -1119,7 +1100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1131,11 +1112,11 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1144,10 +1125,10 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1156,19 +1137,19 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1176,9 +1157,9 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1187,10 +1168,10 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#50f69e145f59305df6c22917e8dbb9b27a73a285" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" dependencies = [ - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1678,8 +1659,7 @@ dependencies = [ "cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.7.0", "ethcore-util 1.7.0", - "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -1716,14 +1696,14 @@ dependencies = [ "ethcore-signer 1.7.0", "ethcore-util 1.7.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)", + "ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable)", ] [[package]] @@ -2629,7 +2609,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ws" version = "0.5.3" -source = "git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable#f5c0b35d660244d1b7500693c8cc28277ce1d418" +source = "git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable#f5c0b35d660244d1b7500693c8cc28277ce1d418" dependencies = [ "bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)", "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2715,7 +2695,7 @@ dependencies = [ "checksum core-foundation-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "05eed248dc504a5391c63794fe4fb64f46f071280afaa1b73308f3c0ce4574c5" "checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97" "checksum crypt32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e34988f7e069e0b2f3bfc064295161e489b2d4e04a2e4248fb94360cdf00b4ec" -"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "" +"checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "" "checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf" "checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf" "checksum docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4cc0acb4ce0828c6a5a11d47baa432fe885881c27428c3a4e473e454ffe57a76" @@ -2737,7 +2717,6 @@ dependencies = [ "checksum hidapi 0.3.1 (git+https://github.com/ethcore/hidapi-rs)" = "" "checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" -"checksum hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)" = "" "checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "" "checksum hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)" = "43a15e3273b2133aaac0150478ab443fb89f15c3de41d8d93d8f3bb14bf560f6" "checksum hyper 0.9.18 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9bf64f730d6ee4b0528a5f0a316363da9d8104318731509d4ccc86248f82b3" @@ -2749,13 +2728,13 @@ dependencies = [ "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d95557e7ba6b71377b0f2c3b3ae96c53f1b75a926a6901a500f557a370af730a" "checksum itoa 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91fd9dc2c587067de817fec4ad355e3818c3d893a78cab32a0a474c7a15bb8d5" -"checksum jsonrpc-core 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-macros 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" -"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" @@ -2910,7 +2889,7 @@ dependencies = [ "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "" +"checksum ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=mio-upstream-stable)" = "" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453" "checksum xml-rs 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "65e74b96bd3179209dc70a980da6df843dff09e46eee103a0376c0949257e3ef" diff --git a/Cargo.toml b/Cargo.toml index 62abb2159..954f3f240 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,9 +24,9 @@ serde = "0.9" serde_json = "0.9" app_dirs = "1.1.1" fdlimit = "0.1" -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } -ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } +ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethsync = { path = "sync" } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 95e1f3f56..b2c4945b5 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -25,10 +25,10 @@ unicase = "1.3" url = "1.0" zip = { version = "0.1", default-features = false } -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } # TODO [ToDr] Temporary solution, server should be merged with RPC. -jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-devtools = { path = "../devtools" } ethcore-rpc = { path = "../rpc" } diff --git a/ipfs/Cargo.toml b/ipfs/Cargo.toml index 1443c8cf2..c6241a7aa 100644 --- a/ipfs/Cargo.toml +++ b/ipfs/Cargo.toml @@ -8,9 +8,8 @@ authors = ["Parity Technologies "] [dependencies] ethcore = { path = "../ethcore" } ethcore-util = { path = "../util" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } rlp = { path = "../util/rlp" } mime = "0.2" -hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } cid = "0.2.1" multihash = "0.5" diff --git a/ipfs/src/error.rs b/ipfs/src/error.rs index 1cbd54f1c..fadd75b9b 100644 --- a/ipfs/src/error.rs +++ b/ipfs/src/error.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use {multihash, cid, hyper}; +use {multihash, cid, http}; use route::Out; pub type Result = ::std::result::Result; @@ -25,7 +25,7 @@ pub enum ServerError { /// Wrapped `std::io::Error` IoError(::std::io::Error), /// Other `hyper` error - Other(hyper::error::Error), + Other(http::hyper::error::Error), /// Invalid --ipfs-api-interface InvalidInterface } @@ -80,8 +80,8 @@ impl From<::std::io::Error> for ServerError { } } -impl From for ServerError { - fn from(err: hyper::error::Error) -> ServerError { +impl From for ServerError { + fn from(err: http::hyper::error::Error) -> ServerError { ServerError::Other(err) } } diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 3d79c00fb..df03b6cd7 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -16,14 +16,13 @@ #[macro_use] extern crate mime; -extern crate hyper; extern crate multihash; extern crate cid; extern crate rlp; extern crate ethcore; extern crate ethcore_util as util; -extern crate jsonrpc_http_server; +extern crate jsonrpc_http_server as http; pub mod error; mod route; @@ -33,13 +32,13 @@ use std::sync::Arc; use std::net::{SocketAddr, IpAddr}; use error::ServerError; use route::Out; -use hyper::server::{Listening, Handler, Request, Response}; -use hyper::net::HttpStream; -use hyper::header::{self, Vary, ContentLength, ContentType}; -use hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; +use http::hyper::server::{Listening, Handler, Request, Response}; +use http::hyper::net::HttpStream; +use http::hyper::header::{self, Vary, ContentLength, ContentType}; +use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use ethcore::client::BlockChainClient; -pub use jsonrpc_http_server::{AccessControlAllowOrigin, Host, DomainsValidation}; +pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler pub struct IpfsHandler { @@ -82,14 +81,14 @@ impl Handler for IpfsHandler { } - if !jsonrpc_http_server::is_host_allowed(&req, &self.allowed_hosts) { + if !http::is_host_allowed(&req, &self.allowed_hosts) { self.out = Out::Bad("Disallowed Host header"); return Next::write(); } - let cors_header = jsonrpc_http_server::cors_header(&req, &self.cors_domains); - if cors_header == jsonrpc_http_server::CorsHeader::Invalid { + let cors_header = http::cors_header(&req, &self.cors_domains); + if cors_header == http::CorsHeader::Invalid { self.out = Out::Bad("Disallowed Origin header"); return Next::write(); @@ -209,7 +208,7 @@ pub fn start_server( let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); Ok( - hyper::Server::http(&addr)? + http::hyper::Server::http(&addr)? .handle(move |_| IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())) .map(|(listening, srv)| { diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index fe7afbcf6..fd8555734 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -19,10 +19,10 @@ serde_json = "0.9" time = "0.1" transient-hashmap = "0.1" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-ipc-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-io = { path = "../util/io" } ethcore-ipc = { path = "../ipc/rpc" } diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index a70816f9e..eb4a0ecff 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -1,7 +1,7 @@ [package] -authors = ["Ethcore "] +authors = ["Ethcore "] description = "Parity Rpc Client" -homepage = "http://ethcore.io" +homepage = "http://parity.io" license = "GPL-3.0" name = "parity-rpc-client" version = "1.4.0" @@ -14,8 +14,8 @@ serde = "0.9" serde_json = "0.9" tempdir = "0.3.5" url = "1.2.0" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +ws = { git = "https://github.com/paritytech/ws-rs.git", branch = "mio-upstream-stable" } ethcore-rpc = { path = "../rpc" } ethcore-signer = { path = "../signer" } ethcore-util = { path = "../util" } diff --git a/rpc_client/src/client.rs b/rpc_client/src/client.rs index 5a4568d9e..3ef7ad7ce 100644 --- a/rpc_client/src/client.rs +++ b/rpc_client/src/client.rs @@ -83,18 +83,24 @@ impl Handler for RpcHandler { } fn on_error(&mut self, err: WsError) { match self.complete.take() { - Some(c) => c.complete(Err(RpcError::WsError(err))), - None => println!("unexpected error: {}", err), + Some(c) => match c.send(Err(RpcError::WsError(err))) { + Ok(_) => {}, + Err(_) => warn!(target: "rpc-client", "Unable to notify about error."), + }, + None => warn!(target: "rpc-client", "unexpected error: {}", err), } } fn on_open(&mut self, _: Handshake) -> WsResult<()> { match (self.complete.take(), self.out.take()) { (Some(c), Some(out)) => { - c.complete(Ok(Rpc { + let res = c.send(Ok(Rpc { out: out, counter: AtomicUsize::new(0), pending: self.pending.clone(), })); + if let Err(_) = res { + warn!(target: "rpc-client", "Unable to open a connection.") + } Ok(()) }, _ => { @@ -137,9 +143,9 @@ impl Handler for RpcHandler { } match self.pending.remove(response_id) { - Some(c) => c.complete(ret.map_err(|err| { - RpcError::JsonRpc(err) - })), + Some(c) => if let Err(_) = c.send(ret.map_err(|err| RpcError::JsonRpc(err))) { + warn!(target: "rpc-client", "Unable to send response.") + }, None => warn!( target: "rpc-client", "warning: unexpected id: {}", @@ -225,7 +231,7 @@ impl Rpc { // both fail and succeed. let c = once.take() .expect("connection closure called only once"); - c.complete(Err(RpcError::WsError(err))); + let _ = c.send(Err(RpcError::WsError(err))); }, // c will complete on the `on_open` event in the Handler _ => () diff --git a/signer/Cargo.toml b/signer/Cargo.toml index 95c71b636..075aac9e8 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -12,12 +12,12 @@ rustc_version = "0.1" [dependencies] rand = "0.3.14" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-server-utils = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } log = "0.3" env_logger = "0.3" parity-dapps-glue = { version = "1.4", optional = true } -ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" } +ws = { git = "https://github.com/paritytech/ws-rs.git", branch = "mio-upstream-stable" } ethcore-util = { path = "../util" } ethcore-io = { path = "../util/io" } ethcore-rpc = { path = "../rpc" } diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index 201792340..fa418250b 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -11,9 +11,9 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] log = "0.3" -jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } -jsonrpc-tcp-server = { git = "https://github.com/ethcore/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-util = { path = "../util" } ethcore-devtools = { path = "../devtools" } lazy_static = "0.2" From 1d87f247158b9b722d6760aefad3e3caf97d47d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 20 Mar 2017 12:06:42 +0100 Subject: [PATCH 42/89] Bump --- Cargo.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ec811125..3e73e942f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1100,7 +1100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1112,7 +1112,7 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1125,7 +1125,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1137,7 +1137,7 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1147,7 +1147,7 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1157,7 +1157,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1168,7 +1168,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#1c6bdb2c254f64ef313bbba342f50360a4b02807" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#707cf73a7b72f2eecbf3665c53b4159ec867cbed" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", From d013a13be66c0e259c069771b083b12a6832d827 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 19:45:52 +0100 Subject: [PATCH 43/89] header_chain writes to database --- ethcore/light/src/client/header_chain.rs | 236 +++++++++++++++++++---- ethcore/light/src/client/mod.rs | 9 +- 2 files changed, 207 insertions(+), 38 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 9dcd25888..676142b17 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -23,9 +23,9 @@ //! This is separate from the `BlockChain` for two reasons: //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. -// TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers` use std::collections::{BTreeMap, HashMap}; +use std::sync::Arc; use cht; @@ -34,7 +34,10 @@ use ethcore::error::BlockError; use ethcore::encoded; use ethcore::header::Header; use ethcore::ids::BlockId; -use util::{H256, U256, HeapSizeOf, Mutex, RwLock}; + +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, View}; +use util::{H256, U256, HeapSizeOf, RwLock}; +use util::kvdb::{DBTransaction, KeyValueDB}; use smallvec::SmallVec; @@ -43,6 +46,9 @@ use smallvec::SmallVec; /// relevant to any blocks we've got in memory. const HISTORY: u64 = 2048; +/// The best block key. Maps to a `u64` best block number. +const BEST_KEY: &'static [u8] = &*b"best_block_key"; + /// Information about a block. #[derive(Debug, Clone)] pub struct BlockDescriptor { @@ -75,39 +81,130 @@ impl HeapSizeOf for Entry { } } +impl Encodable for Entry { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.candidates.len()); + + for candidate in &self.candidates { + s.begin_list(3) + .append(&candidate.hash) + .append(&candidate.parent_hash) + .append(&candidate.total_difficulty); + } + } +} + +impl Decodable for Entry { + fn decode(decoder: &D) -> Result { + let rlp = decoder.as_rlp(); + + let mut candidates = SmallVec::<[Candidate; 3]>::new(); + + for item in rlp.iter() { + candidates.push(Candidate { + hash: item.val_at(0)?, + parent_hash: item.val_at(1)?, + total_difficulty: item.val_at(2)?, + }) + } + + if candidates.is_empty() { return Err(DecoderError::Custom("Empty candidates vector submitted.")) } + + // rely on the invariant that the canonical entry is always first. + let canon_hash = candidates[0].hash; + Ok(Entry { + candidates: candidates, + canonical_hash: canon_hash, + }) + } +} + +fn cht_key(number: u64) -> String { + format!("canonical_{}", number) +} + +fn era_key(number: u64) -> String { + format!("candidates_{}", number) +} + /// Header chain. See module docs for more details. pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. candidates: RwLock>, headers: RwLock>, best_block: RwLock, - cht_roots: Mutex>, + db: Arc, + col: Option, } impl HeaderChain { - /// Create a new header chain given this genesis block. - pub fn new(genesis: &[u8]) -> Self { + /// Create a new header chain given this genesis block and database to read from. + pub fn new(db: Arc, col: Option, genesis: &[u8]) -> Result { use ethcore::views::HeaderView; - let g_view = HeaderView::new(genesis); + let chain = if let Some(best_number) = db.get(col, BEST_KEY)?.map(|x| ::rlp::decode(&x)) { + let mut cur_number = best_number; + let mut candidates = BTreeMap::new(); + let mut headers = HashMap::new(); - HeaderChain { - genesis_header: encoded::Header::new(genesis.to_owned()), - best_block: RwLock::new(BlockDescriptor { - hash: g_view.hash(), - number: 0, - total_difficulty: g_view.difficulty(), - }), - candidates: RwLock::new(BTreeMap::new()), - headers: RwLock::new(HashMap::new()), - cht_roots: Mutex::new(Vec::new()), - } + // load all era entries and referenced headers within them. + while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { + let entry: Entry = ::rlp::decode(&entry); + for candidate in &entry.candidates { + match db.get(col, &*candidate.hash)? { + Some(hdr) => headers.insert(candidate.hash, encoded::Header::new(hdr.to_vec())), + None => return Err(format!("Database missing referenced header: {}", candidate.hash)), + }; + } + candidates.insert(cur_number, entry); + + cur_number -= 1; + } + + // fill best block block descriptor. + if candidates.is_empty() { return Err(format!("Database corrupt: best block referenced but no data.")) } + let best_block = { + let era = candidates.get(&best_number) + .expect("candidates non-empty; filled in loop starting at best_number; qed"); + let best = &era.candidates[0]; + BlockDescriptor { + hash: best.hash, + number: best_number, + total_difficulty: best.total_difficulty, + } + }; + + HeaderChain { + genesis_header: encoded::Header::new(genesis.to_owned()), + best_block: RwLock::new(best_block), + candidates: RwLock::new(candidates), + headers: RwLock::new(headers), + db: db, + col: col, + } + } else { + let g_view = HeaderView::new(genesis); + HeaderChain { + genesis_header: encoded::Header::new(genesis.to_owned()), + best_block: RwLock::new(BlockDescriptor { + hash: g_view.hash(), + number: 0, + total_difficulty: g_view.difficulty(), + }), + candidates: RwLock::new(BTreeMap::new()), + headers: RwLock::new(HashMap::new()), + db: db, + col: col, + } + }; + + Ok(chain) } /// Insert a pre-verified header. /// /// This blindly trusts that the data given to it is sensible. - pub fn insert(&self, header: Header) -> Result<(), BlockError> { + pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result<(), BlockError> { let hash = header.hash(); let number = header.number(); let parent_hash = *header.parent_hash(); @@ -129,15 +226,19 @@ impl HeaderChain { let total_difficulty = parent_td + *header.difficulty(); // insert headers and candidates entries. - candidates.entry(number).or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }) - .candidates.push(Candidate { + { + let cur_era = candidates.entry(number) + .or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }); + cur_era.candidates.push(Candidate { hash: hash, parent_hash: parent_hash, total_difficulty: total_difficulty, - }); + }); + } - let raw = ::rlp::encode(&header).to_vec(); - self.headers.write().insert(hash, encoded::Header::new(raw)); + let raw = ::rlp::encode(&header); + transaction.put(self.col, &hash[..], &*raw); + self.headers.write().insert(hash, encoded::Header::new(raw.to_vec())); // reorganize ancestors so canonical entries are first in their // respective candidates vectors. @@ -160,6 +261,10 @@ impl HeaderChain { // what about reorgs > cht::SIZE + HISTORY? // resetting to the last block of a given CHT should be possible. canon_hash = entry.candidates[0].parent_hash; + + // write altered era to disk. + let rlp_era = ::rlp::encode(&*entry); + transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); } trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); @@ -168,13 +273,13 @@ impl HeaderChain { number: number, total_difficulty: total_difficulty, }; + transaction.put(self.col, BEST_KEY, &*::rlp::encode(&number)); // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); if earliest_era + HISTORY + cht::SIZE <= number { let cht_num = cht::block_to_cht_number(earliest_era) .expect("fails only for number == 0; genesis never imported; qed"); - debug_assert_eq!(cht_num as usize, self.cht_roots.lock().len()); let mut headers = self.headers.write(); @@ -186,10 +291,13 @@ impl HeaderChain { let iter = || { let era_entry = candidates.remove(&i) .expect("all eras are sequential with no gaps; qed"); + transaction.delete(self.col, era_key(i).as_bytes()); + i += 1; for ancient in &era_entry.candidates { headers.remove(&ancient.hash); + transaction.delete(self.col, &ancient.hash); } let canon = &era_entry.candidates[0]; @@ -199,9 +307,9 @@ impl HeaderChain { .expect("fails only when too few items; this is checked; qed") }; + // write the CHT root to the database. debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); - - self.cht_roots.lock().push(cht_root); + transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root)); } } @@ -257,7 +365,13 @@ impl HeaderChain { /// This is because it's assumed that the genesis hash is known, /// so including it within a CHT would be redundant. pub fn cht_root(&self, n: usize) -> Option { - self.cht_roots.lock().get(n).map(|h| h.clone()) + match self.db.get(self.col, cht_key(n as u64).as_bytes()) { + Ok(val) => val.map(|x| ::rlp::decode(&x)), + Err(e) => { + warn!(target: "chain", "Error reading from database: {}", e); + None + } + } } /// Get the genesis hash. @@ -297,8 +411,7 @@ impl HeaderChain { impl HeapSizeOf for HeaderChain { fn heap_size_of_children(&self) -> usize { self.candidates.read().heap_size_of_children() + - self.headers.read().heap_size_of_children() + - self.cht_roots.lock().heap_size_of_children() + self.headers.read().heap_size_of_children() } } @@ -324,16 +437,23 @@ impl<'a> Iterator for AncestryIter<'a> { #[cfg(test)] mod tests { use super::HeaderChain; + use std::sync::Arc; + use ethcore::ids::BlockId; use ethcore::header::Header; use ethcore::spec::Spec; + fn make_db() -> Arc<::util::KeyValueDB> { + Arc::new(::util::kvdb::in_memory(0)) + } + #[test] fn basic_chain() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); + let db = make_db(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -345,7 +465,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 10; } @@ -361,7 +483,8 @@ mod tests { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let db = make_db(); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -373,7 +496,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 10; } @@ -389,7 +514,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 10; } @@ -410,7 +537,9 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * (i * i).into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); rolling_timestamp += 11; } @@ -432,11 +561,46 @@ mod tests { fn earliest_is_latest() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); + let db = make_db(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Latest).is_some()); assert!(chain.block_header(BlockId::Pending).is_some()); } + + #[test] + fn restore_from_db() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + { + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + + rolling_timestamp += 10; + } + } + + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + assert!(chain.block_header(BlockId::Number(10)).is_none()); + assert!(chain.block_header(BlockId::Number(9000)).is_some()); + assert!(chain.cht_root(2).is_some()); + assert!(chain.cht_root(3).is_none()); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999); + } } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index c791caed1..23242f407 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -111,10 +111,14 @@ pub struct Client { impl Client { /// Create a new `Client`. pub fn new(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { + // TODO: use real DB. + let db = ::util::kvdb::in_memory(0); + let gh = ::rlp::encode(&spec.genesis_header()); + Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), engine: spec.engine.clone(), - chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())), + chain: HeaderChain::new(Arc::new(db), None, &gh).expect("new db every time"), report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), } @@ -201,7 +205,8 @@ impl Client { for verified_header in self.queue.drain(MAX) { let (num, hash) = (verified_header.number(), verified_header.hash()); - match self.chain.insert(verified_header) { + let mut tx = unimplemented!(); + match self.chain.insert(&mut tx, verified_header) { Ok(()) => { good.push(hash); self.report.write().blocks_imported += 1; From 21771aa1a6542783833307168128b024fb994b88 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 20:23:58 +0100 Subject: [PATCH 44/89] don't keep headers in memory to avoid DoS --- ethcore/light/src/client/header_chain.rs | 153 +++++++++++++++++------ 1 file changed, 118 insertions(+), 35 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 676142b17..25c836051 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -24,7 +24,7 @@ //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::sync::Arc; use cht; @@ -35,7 +35,7 @@ use ethcore::encoded; use ethcore::header::Header; use ethcore::ids::BlockId; -use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, View}; +use rlp::{Encodable, Decodable, Decoder, DecoderError, RlpStream, Rlp, View}; use util::{H256, U256, HeapSizeOf, RwLock}; use util::kvdb::{DBTransaction, KeyValueDB}; @@ -46,8 +46,8 @@ use smallvec::SmallVec; /// relevant to any blocks we've got in memory. const HISTORY: u64 = 2048; -/// The best block key. Maps to a `u64` best block number. -const BEST_KEY: &'static [u8] = &*b"best_block_key"; +/// The best block key. Maps to an RLP list: [best_era, last_era] +const CURRENT_KEY: &'static [u8] = &*b"best_and_latest"; /// Information about a block. #[derive(Debug, Clone)] @@ -131,7 +131,6 @@ fn era_key(number: u64) -> String { pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. candidates: RwLock>, - headers: RwLock>, best_block: RwLock, db: Arc, col: Option, @@ -142,30 +141,33 @@ impl HeaderChain { pub fn new(db: Arc, col: Option, genesis: &[u8]) -> Result { use ethcore::views::HeaderView; - let chain = if let Some(best_number) = db.get(col, BEST_KEY)?.map(|x| ::rlp::decode(&x)) { - let mut cur_number = best_number; + let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { + let (best_number, highest_number) = { + let rlp = Rlp::new(¤t); + (rlp.val_at(0), rlp.val_at(1)) + }; + + let mut cur_number = highest_number; let mut candidates = BTreeMap::new(); - let mut headers = HashMap::new(); // load all era entries and referenced headers within them. while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { let entry: Entry = ::rlp::decode(&entry); - for candidate in &entry.candidates { - match db.get(col, &*candidate.hash)? { - Some(hdr) => headers.insert(candidate.hash, encoded::Header::new(hdr.to_vec())), - None => return Err(format!("Database missing referenced header: {}", candidate.hash)), - }; - } + trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", + cur_number, entry.candidates.len()); + candidates.insert(cur_number, entry); cur_number -= 1; } // fill best block block descriptor. - if candidates.is_empty() { return Err(format!("Database corrupt: best block referenced but no data.")) } let best_block = { - let era = candidates.get(&best_number) - .expect("candidates non-empty; filled in loop starting at best_number; qed"); + let era = match candidates.get(&best_number) { + Some(era) => era, + None => return Err(format!("Database corrupt: highest block referenced but no data.")), + }; + let best = &era.candidates[0]; BlockDescriptor { hash: best.hash, @@ -178,7 +180,6 @@ impl HeaderChain { genesis_header: encoded::Header::new(genesis.to_owned()), best_block: RwLock::new(best_block), candidates: RwLock::new(candidates), - headers: RwLock::new(headers), db: db, col: col, } @@ -192,7 +193,6 @@ impl HeaderChain { total_difficulty: g_view.difficulty(), }), candidates: RwLock::new(BTreeMap::new()), - headers: RwLock::new(HashMap::new()), db: db, col: col, } @@ -225,7 +225,7 @@ impl HeaderChain { let total_difficulty = parent_td + *header.difficulty(); - // insert headers and candidates entries. + // insert headers and candidates entries and write era to disk. { let cur_era = candidates.entry(number) .or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }); @@ -234,15 +234,32 @@ impl HeaderChain { parent_hash: parent_hash, total_difficulty: total_difficulty, }); + + // fix ordering of era before writing. + if total_difficulty > cur_era.candidates[0].total_difficulty { + let cur_pos = cur_era.candidates.len() - 1; + cur_era.candidates.swap(cur_pos, 0); + cur_era.canonical_hash = hash; + } + + transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era)) } let raw = ::rlp::encode(&header); transaction.put(self.col, &hash[..], &*raw); - self.headers.write().insert(hash, encoded::Header::new(raw.to_vec())); + + let (best_num, is_new_best) = { + let cur_best = self.best_block.read(); + if cur_best.total_difficulty < total_difficulty { + (number, true) + } else { + (cur_best.number, false) + } + }; // reorganize ancestors so canonical entries are first in their // respective candidates vectors. - if self.best_block.read().total_difficulty < total_difficulty { + if is_new_best { let mut canon_hash = hash; for (&height, entry) in candidates.iter_mut().rev().skip_while(|&(height, _)| *height > number) { if height != number && entry.canonical_hash == canon_hash { break; } @@ -262,9 +279,11 @@ impl HeaderChain { // resetting to the last block of a given CHT should be possible. canon_hash = entry.candidates[0].parent_hash; - // write altered era to disk. - let rlp_era = ::rlp::encode(&*entry); - transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); + // write altered era to disk + if height != number { + let rlp_era = ::rlp::encode(&*entry); + transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); + } } trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); @@ -273,7 +292,6 @@ impl HeaderChain { number: number, total_difficulty: total_difficulty, }; - transaction.put(self.col, BEST_KEY, &*::rlp::encode(&number)); // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); @@ -281,8 +299,6 @@ impl HeaderChain { let cht_num = cht::block_to_cht_number(earliest_era) .expect("fails only for number == 0; genesis never imported; qed"); - let mut headers = self.headers.write(); - let cht_root = { let mut i = earliest_era; @@ -296,7 +312,6 @@ impl HeaderChain { i += 1; for ancient in &era_entry.candidates { - headers.remove(&ancient.hash); transaction.delete(self.col, &ancient.hash); } @@ -313,20 +328,37 @@ impl HeaderChain { } } + // write the best and latest eras to the database. + { + let latest_num = *candidates.iter().rev().next().expect("at least one era just inserted; qed").0; + let mut stream = RlpStream::new_list(2); + stream.append(&best_num).append(&latest_num); + transaction.put(self.col, CURRENT_KEY, &stream.out()) + } Ok(()) } /// Get a block header. In the case of query by number, only canonical blocks /// will be returned. pub fn block_header(&self, id: BlockId) -> Option { + let load_from_db = |hash: H256| { + match self.db.get(self.col, &hash) { + Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new), + Err(e) => { + warn!(target: "chain", "Failed to read from database: {}", e); + None + } + } + }; + match id { BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), - BlockId::Hash(hash) => self.headers.read().get(&hash).cloned(), + BlockId::Hash(hash) => load_from_db(hash), BlockId::Number(num) => { if self.best_block.read().number < num { return None } self.candidates.read().get(&num).map(|entry| entry.canonical_hash) - .and_then(|hash| self.headers.read().get(&hash).cloned()) + .and_then(load_from_db) } BlockId::Latest | BlockId::Pending => { let hash = { @@ -338,7 +370,7 @@ impl HeaderChain { best.hash }; - self.headers.read().get(&hash).cloned() + load_from_db(hash) } } } @@ -401,7 +433,7 @@ impl HeaderChain { /// Get block status. pub fn status(&self, hash: &H256) -> BlockStatus { - match self.headers.read().contains_key(hash) { + match self.db.get(self.col, &*hash).ok().map_or(false, |x| x.is_some()) { true => BlockStatus::InChain, false => BlockStatus::Unknown, } @@ -410,8 +442,7 @@ impl HeaderChain { impl HeapSizeOf for HeaderChain { fn heap_size_of_children(&self) -> usize { - self.candidates.read().heap_size_of_children() + - self.headers.read().heap_size_of_children() + self.candidates.read().heap_size_of_children() } } @@ -603,4 +634,56 @@ mod tests { assert!(chain.cht_root(3).is_none()); assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999); } + + #[test] + fn restore_higher_non_canonical() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + { + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + + // push 100 low-difficulty blocks. + for i in 1..101 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + + rolling_timestamp += 10; + } + + // push fewer high-difficulty blocks. + for i in 1..11 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into() * 1000.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + + rolling_timestamp += 10; + } + + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + } + + // after restoration, non-canonical eras should still be loaded. + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + assert!(chain.candidates.read().get(&100).is_some()) + } } From bc9c1d482417bb47a83a56699b71df0a41f68518 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 20:57:13 +0100 Subject: [PATCH 45/89] use a database in ethcore-light --- Cargo.lock | 1 + ethcore/light/Cargo.toml | 1 + ethcore/light/src/client/mod.rs | 39 ++++++++++++++++----- ethcore/light/src/client/service.rs | 54 ++++++++++++++++++++++++++--- ethcore/light/src/lib.rs | 3 ++ ethcore/src/client/config.rs | 2 +- ethcore/src/db.rs | 4 ++- 7 files changed, 88 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6cf05cb0..05d57cbdd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -550,6 +550,7 @@ name = "ethcore-light" version = "1.7.0" dependencies = [ "ethcore 1.7.0", + "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 6f95d8a0e..78210904e 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -17,6 +17,7 @@ ethcore-util = { path = "../../util" } ethcore-network = { path = "../../util/network" } ethcore-io = { path = "../../util/io" } ethcore-ipc = { path = "../../ipc/rpc", optional = true } +ethcore-devtools = { path = "../../devtools" } rlp = { path = "../../util/rlp" } time = "0.1" smallvec = "0.3.1" diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 23242f407..a393130b1 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; -use ethcore::client::{ClientReport, EnvInfo}; +use ethcore::client::{ClientReport, EnvInfo, DatabaseCompactionProfile}; use ethcore::engines::Engine; use ethcore::ids::BlockId; use ethcore::header::Header; @@ -31,7 +31,7 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{H256, Mutex, RwLock}; +use util::{H256, Mutex, RwLock, KeyValueDB}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -45,6 +45,14 @@ mod service; pub struct Config { /// Verification queue config. pub queue: queue::Config, + /// Chain column in database. + pub chain_column: Option, + /// Database cache size. `None` => rocksdb default. + pub db_cache_size: Option, + /// State db compaction profile + pub db_compaction: DatabaseCompactionProfile, + /// Should db have WAL enabled? + pub db_wal: bool, } /// Trait for interacting with the header chain abstractly. @@ -106,22 +114,30 @@ pub struct Client { chain: HeaderChain, report: RwLock, import_lock: Mutex<()>, + db: Arc, } impl Client { /// Create a new `Client`. - pub fn new(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { - // TODO: use real DB. - let db = ::util::kvdb::in_memory(0); + pub fn new(config: Config, db: Arc, chain_col: Option, spec: &Spec, io_channel: IoChannel) -> Result { let gh = ::rlp::encode(&spec.genesis_header()); - Client { + Ok(Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), engine: spec.engine.clone(), - chain: HeaderChain::new(Arc::new(db), None, &gh).expect("new db every time"), + chain: HeaderChain::new(db.clone(), chain_col, &gh)?, report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), - } + db: db, + }) + } + + /// Create a new `Client` backed purely in-memory. + /// This will ignore all database options in the configuration. + pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { + let db = ::util::kvdb::in_memory(0); + + Client::new(config, Arc::new(db), None, spec, io_channel).expect("New DB creation infallible; qed") } /// Import a header to the queue for additional verification. @@ -205,7 +221,7 @@ impl Client { for verified_header in self.queue.drain(MAX) { let (num, hash) = (verified_header.number(), verified_header.hash()); - let mut tx = unimplemented!(); + let mut tx = self.db.transaction(); match self.chain.insert(&mut tx, verified_header) { Ok(()) => { good.push(hash); @@ -216,6 +232,11 @@ impl Client { bad.push(hash); } } + self.db.write_buffered(tx); + + if let Err(e) = self.db.flush() { + panic!("Database flush failed: {}. Check disk health and space.", e); + } } self.queue.mark_as_bad(&bad); diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index fe7caee94..f7a4d41a9 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -17,14 +17,36 @@ //! Minimal IO service for light client. //! Just handles block import messages and passes them to the client. +use std::fmt; +use std::path::Path; use std::sync::Arc; +use ethcore::db; use ethcore::service::ClientIoMessage; use ethcore::spec::Spec; use io::{IoContext, IoError, IoHandler, IoService}; +use util::kvdb::{Database, DatabaseConfig}; use super::{Client, Config as ClientConfig}; +/// Errors on service initialization. +#[derive(Debug)] +pub enum Error { + /// Database error. + Database(String), + /// I/O service error. + Io(IoError), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Database(ref msg) => write!(f, "Database error: {}", msg), + Error::Io(ref err) => write!(f, "I/O service error: {}", err), + } + } +} + /// Light client service. pub struct Service { client: Arc, @@ -33,11 +55,31 @@ pub struct Service { impl Service { /// Start the service: initialize I/O workers and client itself. - pub fn start(config: ClientConfig, spec: &Spec) -> Result { - let io_service = try!(IoService::::start()); - let client = Arc::new(Client::new(config, spec, io_service.channel())); - try!(io_service.register_handler(Arc::new(ImportBlocks(client.clone())))); + pub fn start(config: ClientConfig, spec: &Spec, path: &Path) -> Result { + // initialize database. + let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); + // give all rocksdb cache to the header chain column. + if let Some(size) = config.db_cache_size { + db_config.set_cache(db::COL_LIGHT_CHAIN, size); + } + + db_config.compaction = config.db_compaction.compaction_profile(path); + db_config.wal = config.db_wal; + + let db = Arc::new(Database::open( + &db_config, + &path.to_str().expect("DB path could not be converted to string.") + ).map_err(Error::Database)?); + + let io_service = IoService::::start().map_err(Error::Io)?; + let client = Arc::new(Client::new(config, + db, + db::COL_LIGHT_CHAIN, + spec, + io_service.channel(), + ).map_err(Error::Database)?); + io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; Ok(Service { client: client, _io_service: io_service, @@ -63,11 +105,13 @@ impl IoHandler for ImportBlocks { #[cfg(test)] mod tests { use super::Service; + use devtools::RandomTempPath; use ethcore::spec::Spec; #[test] fn it_works() { let spec = Spec::new_test(); - Service::start(Default::default(), &spec).unwrap(); + let temp_path = RandomTempPath::new(); + Service::start(Default::default(), &spec, temp_path.as_path()).unwrap(); } } diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ada58d8de..828d77043 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -76,3 +76,6 @@ extern crate stats; #[cfg(feature = "ipc")] extern crate ethcore_ipc as ipc; + +#[cfg(test)] +extern crate ethcore_devtools as devtools; diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 5c7cf9471..b58ae83cb 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -26,7 +26,7 @@ use verification::{VerifierType, QueueConfig}; use util::{journaldb, CompactionProfile}; /// Client state db compaction profile -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum DatabaseCompactionProfile { /// Try to determine compaction profile automatically Auto, diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index 4e8da714d..bccb8e943 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -38,8 +38,10 @@ pub const COL_TRACE: Option = Some(4); pub const COL_ACCOUNT_BLOOM: Option = Some(5); /// Column for general information from the local node which can persist. pub const COL_NODE_INFO: Option = Some(6); +/// Column for the light client chain. +pub const COL_LIGHT_CHAIN: Option = Some(7); /// Number of columns in DB -pub const NUM_COLUMNS: Option = Some(7); +pub const NUM_COLUMNS: Option = Some(8); /// Modes for updating caches. #[derive(Clone, Copy)] From dd1f8295c4c1ab5fe155e9435dd5607728057056 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 21 Mar 2017 21:00:31 +0100 Subject: [PATCH 46/89] fix sync test compilation --- sync/src/light_sync/tests/test_net.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index 898f8766d..2319e8d35 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -207,7 +207,7 @@ impl TestNet { pub fn light(n_light: usize, n_full: usize) -> Self { let mut peers = Vec::with_capacity(n_light + n_full); for _ in 0..n_light { - let client = LightClient::new(Default::default(), &Spec::new_test(), IoChannel::disconnected()); + let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected()); peers.push(Arc::new(Peer::new_light(Arc::new(client)))) } From dd1a3fc60ac1c67b89004a64ab1076c0e4f07f7f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 15:58:43 +0100 Subject: [PATCH 47/89] migration to light client mode --- ethcore/src/migrations/mod.rs | 19 +++++++++++++++++-- ethcore/src/migrations/v11.rs | 26 -------------------------- 2 files changed, 17 insertions(+), 28 deletions(-) delete mode 100644 ethcore/src/migrations/v11.rs diff --git a/ethcore/src/migrations/mod.rs b/ethcore/src/migrations/mod.rs index 6cc4a13a8..76b10fd19 100644 --- a/ethcore/src/migrations/mod.rs +++ b/ethcore/src/migrations/mod.rs @@ -16,6 +16,8 @@ //! Database migrations. +use util::migration::ChangeColumns; + pub mod state; pub mod blocks; pub mod extras; @@ -27,5 +29,18 @@ pub use self::v9::Extract; mod v10; pub use self::v10::ToV10; -mod v11; -pub use self::v11::TO_V11; +/// The migration from v10 to v11. +/// Adds a column for node info. +pub const TO_V11: ChangeColumns = ChangeColumns { + pre_columns: Some(6), + post_columns: Some(7), + version: 11, +}; + +/// The migration from v11 to v12. +/// Adds a column for light chain storage. +pub const TO_V12: ChangeColumns = ChangeColumns { + pre_columns: Some(7), + post_columns: Some(8), + version: 12, +}; diff --git a/ethcore/src/migrations/v11.rs b/ethcore/src/migrations/v11.rs deleted file mode 100644 index e33de6170..000000000 --- a/ethcore/src/migrations/v11.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Adds a seventh column for node information. - -use util::migration::ChangeColumns; - -/// The migration from v10 to v11. -pub const TO_V11: ChangeColumns = ChangeColumns { - pre_columns: Some(6), - post_columns: Some(7), - version: 11, -}; From a9d75e222311ed1b505b282c50785e0f5fe5603c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 16:45:50 +0100 Subject: [PATCH 48/89] CLI options for light client --- parity/cli/config.full.toml | 2 +- parity/cli/mod.rs | 9 +++++++++ parity/cli/usage.txt | 5 +++++ parity/configuration.rs | 4 ++++ parity/run.rs | 3 +++ rpc/src/v1/tests/helpers/sync_provider.rs | 4 ++-- rpc/src/v1/types/mod.rs | 2 +- rpc/src/v1/types/sync.rs | 18 +++++++++--------- sync/src/api.rs | 18 +++++++++--------- 9 files changed, 43 insertions(+), 22 deletions(-) diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index 6800ec2dc..4ddf8eaa7 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -38,7 +38,7 @@ warp = true allow_ips = "all" snapshot_peers = 0 max_pending_peers = 64 -serve_light = true +no_serve_light = false reserved_only = false reserved_peers = "./path_to_file" diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index b346cb9d8..6fb15dbe6 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -93,6 +93,7 @@ usage! { flag_chain: String = "foundation", or |c: &Config| otry!(c.parity).chain.clone(), flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), + flag_light: bool = false, or |c: &Config| otry!(c.parity).light, // -- Account Options flag_unlock: Option = None, @@ -148,6 +149,8 @@ usage! { flag_reserved_only: bool = false, or |c: &Config| otry!(c.network).reserved_only.clone(), flag_no_ancient_blocks: bool = false, or |_| None, + flag_no_serve_light: bool = false, + or |c: &Config| otry!(c.network).no_serve_light.clone(), // -- API and Console Options // RPC @@ -372,6 +375,7 @@ struct Operating { db_path: Option, keys_path: Option, identity: Option, + light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -407,6 +411,7 @@ struct Network { node_key: Option, reserved_peers: Option, reserved_only: Option, + no_serve_light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -630,6 +635,7 @@ mod tests { flag_db_path: Some("$HOME/.parity/chains".into()), flag_keys_path: "$HOME/.parity/keys".into(), flag_identity: "".into(), + flag_light: false, // -- Account Options flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), @@ -660,6 +666,7 @@ mod tests { flag_reserved_peers: Some("./path_to_file".into()), flag_reserved_only: false, flag_no_ancient_blocks: false, + flag_no_serve_light: false, // -- API and Console Options // RPC @@ -832,6 +839,7 @@ mod tests { db_path: None, keys_path: None, identity: None, + light: None, }), account: Some(Account { unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), @@ -861,6 +869,7 @@ mod tests { node_key: None, reserved_peers: Some("./path/to/reserved_peers".into()), reserved_only: Some(true), + no_serve_light: None, }), rpc: Some(Rpc { disable: Some(true), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 322543607..55f64b018 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -67,6 +67,10 @@ Operating Options: --keys-path PATH Specify the path for JSON key files to be found (default: {flag_keys_path}). --identity NAME Specify your node's name. (default: {flag_identity}) + --light Experimental: run in light client mode. Light clients + synchronize a bare minimum of data and fetch necessary + data on-demand from the network. Much lower in storage, + potentially higher in bandwidth. (default: {flag_light}) Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. @@ -126,6 +130,7 @@ Networking Options: --max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers}) --no-ancient-blocks Disable downloading old blocks after snapshot restoration or warp sync. (default: {flag_no_ancient_blocks}) + --no-serve-light Disable serving of light peers. (default: {flag_no_serve_light}) API and Console Options: --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc}) diff --git a/parity/configuration.rs b/parity/configuration.rs index 9b491143d..1418f372c 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -375,6 +375,8 @@ impl Configuration { check_seal: !self.args.flag_no_seal_check, download_old_blocks: !self.args.flag_no_ancient_blocks, verifier_settings: verifier_settings, + serve_light: !self.args.flag_no_serve_light, + light: self.args.flag_light, }; Cmd::Run(run_cmd) }; @@ -1194,6 +1196,8 @@ mod tests { check_seal: true, download_old_blocks: true, verifier_settings: Default::default(), + serve_light: true, + light: false, }; expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected)); diff --git a/parity/run.rs b/parity/run.rs index 7fe1ad273..a00a5c03c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -107,6 +107,8 @@ pub struct RunCmd { pub check_seal: bool, pub download_old_blocks: bool, pub verifier_settings: VerifierSettings, + pub serve_light: bool, + pub light: bool, } pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { @@ -248,6 +250,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R sync_config.fork_block = spec.fork_block(); sync_config.warp_sync = cmd.warp_sync; sync_config.download_old_blocks = cmd.download_old_blocks; + sync_config.serve_light = cmd.serve_light; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index fe2ae3f59..83c7db015 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -83,7 +83,7 @@ impl SyncProvider for TestSyncProvider { difficulty: Some(40.into()), head: 50.into(), }), - les_info: None, + pip_info: None, }, PeerInfo { id: None, @@ -96,7 +96,7 @@ impl SyncProvider for TestSyncProvider { difficulty: None, head: 60.into() }), - les_info: None, + pip_info: None, } ] } diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index a4bfcb41f..78eee5137 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -63,7 +63,7 @@ pub use self::receipt::Receipt; pub use self::rpc_settings::RpcSettings; pub use self::sync::{ SyncStatus, SyncInfo, Peers, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, - TransactionStats, ChainStatus, EthProtocolInfo, LesProtocolInfo, + TransactionStats, ChainStatus, EthProtocolInfo, PipProtocolInfo, }; pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; diff --git a/rpc/src/v1/types/sync.rs b/rpc/src/v1/types/sync.rs index d83a3a64c..813fe8cb3 100644 --- a/rpc/src/v1/types/sync.rs +++ b/rpc/src/v1/types/sync.rs @@ -83,8 +83,8 @@ pub struct PeerNetworkInfo { pub struct PeerProtocolsInfo { /// Ethereum protocol information pub eth: Option, - /// LES protocol information. - pub les: Option, + /// PIP protocol information. + pub pip: Option, } /// Peer Ethereum protocol information @@ -108,10 +108,10 @@ impl From for EthProtocolInfo { } } -/// Peer LES protocol information +/// Peer PIP protocol information #[derive(Default, Debug, Serialize)] -pub struct LesProtocolInfo { - /// Negotiated LES protocol version +pub struct PipProtocolInfo { + /// Negotiated PIP protocol version pub version: u32, /// Peer total difficulty pub difficulty: U256, @@ -119,9 +119,9 @@ pub struct LesProtocolInfo { pub head: String, } -impl From for LesProtocolInfo { - fn from(info: ethsync::LesProtocolInfo) -> Self { - LesProtocolInfo { +impl From for PipProtocolInfo { + fn from(info: ethsync::PipProtocolInfo) -> Self { + PipProtocolInfo { version: info.version, difficulty: info.difficulty.into(), head: info.head.hex(), @@ -171,7 +171,7 @@ impl From for PeerInfo { }, protocols: PeerProtocolsInfo { eth: p.eth_info.map(Into::into), - les: p.les_info.map(Into::into), + pip: p.pip_info.map(Into::into), }, } } diff --git a/sync/src/api.rs b/sync/src/api.rs index 4cdc9d37a..6feba062d 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -126,7 +126,7 @@ pub struct PeerInfo { /// Eth protocol info. pub eth_info: Option, /// Light protocol info. - pub les_info: Option, + pub pip_info: Option, } /// Ethereum protocol info. @@ -141,10 +141,10 @@ pub struct EthProtocolInfo { pub difficulty: Option, } -/// LES protocol info. +/// PIP protocol info. #[derive(Debug)] #[cfg_attr(feature = "ipc", derive(Binary))] -pub struct LesProtocolInfo { +pub struct PipProtocolInfo { /// Protocol version pub version: u32, /// SHA3 of peer best block hash @@ -153,9 +153,9 @@ pub struct LesProtocolInfo { pub difficulty: U256, } -impl From for LesProtocolInfo { +impl From for PipProtocolInfo { fn from(status: light_net::Status) -> Self { - LesProtocolInfo { + PipProtocolInfo { version: status.protocol_version, head: status.head_hash, difficulty: status.head_td, @@ -184,7 +184,7 @@ pub struct EthSync { network: NetworkService, /// Main (eth/par) protocol handler eth_handler: Arc, - /// Light (les) protocol handler + /// Light (pip) protocol handler light_proto: Option>, /// The main subprotocol name subprotocol_name: [u8; 3], @@ -264,7 +264,7 @@ impl SyncProvider for EthSync { remote_address: session_info.remote_address, local_address: session_info.local_address, eth_info: eth_sync.peer_info(&peer_id), - les_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into), + pip_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into), }) }).collect() }).unwrap_or_else(Vec::new) @@ -408,7 +408,7 @@ impl ChainNotify for EthSync { } } -/// LES event handler. +/// PIP event handler. /// Simply queues transactions from light client peers. struct TxRelay(Arc); @@ -786,7 +786,7 @@ impl LightSyncProvider for LightSync { remote_address: session_info.remote_address, local_address: session_info.local_address, eth_info: None, - les_info: self.proto.peer_status(&peer_id).map(Into::into), + pip_info: self.proto.peer_status(&peer_id).map(Into::into), }) }).collect() }).unwrap_or_else(Vec::new) From c718b5618ec14491299368a0e3806708a98708cc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 18:32:04 +0100 Subject: [PATCH 49/89] initial light CLI --- ethcore/light/src/client/mod.rs | 7 ++- ethcore/light/src/client/service.rs | 2 +- parity/cli/usage.txt | 3 +- parity/migration.rs | 3 +- parity/run.rs | 94 ++++++++++++++++++++++++++++- sync/src/api.rs | 2 +- 6 files changed, 101 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index a393130b1..92866da6d 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use ethcore::block_import_error::BlockImportError; use ethcore::block_status::BlockStatus; -use ethcore::client::{ClientReport, EnvInfo, DatabaseCompactionProfile}; +use ethcore::client::{ClientReport, EnvInfo}; use ethcore::engines::Engine; use ethcore::ids::BlockId; use ethcore::header::Header; @@ -31,7 +31,8 @@ use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; -use util::{H256, Mutex, RwLock, KeyValueDB}; +use util::{H256, Mutex, RwLock}; +use util::kvdb::{KeyValueDB, CompactionProfile}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -50,7 +51,7 @@ pub struct Config { /// Database cache size. `None` => rocksdb default. pub db_cache_size: Option, /// State db compaction profile - pub db_compaction: DatabaseCompactionProfile, + pub db_compaction: CompactionProfile, /// Should db have WAL enabled? pub db_wal: bool, } diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index f7a4d41a9..89538fec2 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -64,7 +64,7 @@ impl Service { db_config.set_cache(db::COL_LIGHT_CHAIN, size); } - db_config.compaction = config.db_compaction.compaction_profile(path); + db_config.compaction = config.db_compaction; db_config.wal = config.db_wal; let db = Arc::new(Database::open( diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 55f64b018..11d5c6e54 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -70,7 +70,8 @@ Operating Options: --light Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary data on-demand from the network. Much lower in storage, - potentially higher in bandwidth. (default: {flag_light}) + potentially higher in bandwidth. Has no effect with + subcommands (default: {flag_light}) Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. diff --git a/parity/migration.rs b/parity/migration.rs index 445724325..c4e5f5ac6 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -30,7 +30,7 @@ use ethcore::migrations::Extract; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. -const CURRENT_VERSION: u32 = 11; +const CURRENT_VERSION: u32 = 12; /// First version of the consolidated database. const CONSOLIDATION_VERSION: u32 = 9; /// Defines how many items are migrated to the new version of database at once. @@ -147,6 +147,7 @@ fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> R let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?; + manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?; Ok(manager) } diff --git a/parity/run.rs b/parity/run.rs index a00a5c03c..92aba9ed7 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -154,6 +154,89 @@ impl ::local_store::NodeInfo for FullNodeInfo { } } +// helper for light execution. +fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) -> Result<(bool, Option), String> { + use light::client as light_client; + use ethsync::{LightSyncParams, LightSync, ManageNetwork}; + use util::RwLock; + + let panic_handler = PanicHandler::new_in_arc(); + + // load spec + let spec = cmd.spec.spec()?; + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + + let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()); + + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?; + + // create dirs used by parity + cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled, cmd.secretstore_conf.enabled)?; + + info!("Starting {}", Colour::White.bold().paint(version())); + info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client")); + + // start client and create transaction queue. + let mut config = light_client::Config { + queue: Default::default(), + chain_column: ::ethcore::db::COL_LIGHT_CHAIN, + db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024), + db_compaction: compaction, + db_wal: cmd.wal, + }; + + config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; + config.queue.verifier_settings = cmd.verifier_settings; + + let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm)) + .map_err(|e| format!("Error starting light client: {}", e))?; + let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); + let provider = ::light::provider::LightProvider::new(service.client().clone(), txq); + + // start network. + // set up bootnodes + let mut net_conf = cmd.net_conf; + if !cmd.custom_bootnodes { + net_conf.boot_nodes = spec.nodes.clone(); + } + + // set network path. + net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); + let sync_params = LightSyncParams { + network_config: net_conf.into_basic().map_err(|e| format!("Failed to produce network config: {}", e))?, + client: Arc::new(provider), + network_id: cmd.network_id.unwrap_or(spec.network_id()), + subprotocol_name: ::ethsync::LIGHT_PROTOCOL, + }; + let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; + light_sync.start_network(); + + // start RPCs. + let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; + + // prepare account provider + let _account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); + // rest TODO + + // wait for ctrl-c. + Ok(wait_for_exit(panic_handler, None, None, can_restart)) +} + pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running @@ -163,12 +246,17 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R } } - // set up panic handler - let panic_handler = PanicHandler::new_in_arc(); - // increase max number of open files raise_fd_limit(); + // run as light client. + if cmd.light { + return execute_light(cmd, can_restart, logger); + } + + // set up panic handler + let panic_handler = PanicHandler::new_in_arc(); + // load spec let spec = cmd.spec.spec()?; diff --git a/sync/src/api.rs b/sync/src/api.rs index 6feba062d..010bbeb23 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -43,7 +43,7 @@ pub const WARP_SYNC_PROTOCOL_ID: ProtocolId = *b"par"; /// Ethereum sync protocol pub const ETH_PROTOCOL: ProtocolId = *b"eth"; /// Ethereum light protocol -pub const LIGHT_PROTOCOL: ProtocolId = *b"plp"; +pub const LIGHT_PROTOCOL: ProtocolId = *b"pip"; /// Sync configuration #[derive(Debug, Clone, Copy)] From e3d6525d8313eae2c471cd457f6c4bfebef2ed2b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 19:26:51 +0100 Subject: [PATCH 50/89] store cumulative cost in pending request set. --- ethcore/light/src/net/mod.rs | 21 +++++++------ ethcore/light/src/net/request_set.rs | 45 +++++++++++++++++++++++----- ethcore/light/src/net/tests/mod.rs | 4 +-- sync/src/light_sync/mod.rs | 6 ++-- 4 files changed, 56 insertions(+), 20 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index de86f1ce5..5f78f9c22 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -303,12 +303,9 @@ impl LightProtocol { match peer.remote_flow { None => Err(Error::NotServer), Some((ref mut creds, ref params)) => { - // check that enough credits are available. - let mut temp_creds: Credits = creds.clone(); - for request in requests.requests() { - temp_creds.deduct_cost(params.compute_cost(request))?; - } - *creds = temp_creds; + // compute and deduct cost. + let cost = params.compute_cost_multi(requests.requests()); + creds.deduct_cost(cost)?; let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); io.send(*peer_id, packet::REQUEST, { @@ -318,7 +315,7 @@ impl LightProtocol { }); // begin timeout. - peer.pending_requests.insert(req_id, requests, SteadyTime::now()); + peer.pending_requests.insert(req_id, requests, cost, SteadyTime::now()); Ok(req_id) } } @@ -408,13 +405,18 @@ impl LightProtocol { Some(peer_info) => { let mut peer_info = peer_info.lock(); let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); + let cumulative_cost = peer_info.pending_requests.cumulative_cost(); let flow_info = peer_info.remote_flow.as_mut(); match (req_info, flow_info) { (Some(_), Some(flow_info)) => { let &mut (ref mut c, ref mut flow) = flow_info; - let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); - c.update_to(actual_credits); + + // only update if the cumulative cost of the request set is zero. + if cumulative_cost == 0.into() { + let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); + c.update_to(actual_credits); + } Ok(()) } @@ -520,6 +522,7 @@ impl LightProtocol { last_update: SteadyTime::now(), }); + trace!(target: "pip", "Sending status to peer {}", peer); io.send(*peer, packet::STATUS, status_packet); } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index a2391ef6f..094fa1894 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -27,22 +27,29 @@ use std::iter::FromIterator; use request::Request; use request::Requests; use net::{timeout, ReqId}; +use util::U256; use time::{Duration, SteadyTime}; +// Request set entry: requests + cost. +#[derive(Debug)] +struct Entry(Requests, U256); + /// Request set. #[derive(Debug)] pub struct RequestSet { counter: u64, + cumulative_cost: U256, base: Option, ids: HashMap, - reqs: BTreeMap, + reqs: BTreeMap, } impl Default for RequestSet { fn default() -> Self { RequestSet { counter: 0, + cumulative_cost: 0.into(), base: None, ids: HashMap::new(), reqs: BTreeMap::new(), @@ -52,10 +59,12 @@ impl Default for RequestSet { impl RequestSet { /// Push requests onto the stack. - pub fn insert(&mut self, req_id: ReqId, req: Requests, now: SteadyTime) { + pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: SteadyTime) { let counter = self.counter; + self.cumulative_cost = self.cumulative_cost + cost; + self.ids.insert(req_id, counter); - self.reqs.insert(counter, req); + self.reqs.insert(counter, Entry(req, cost)); if self.reqs.keys().next().map_or(true, |x| *x == counter) { self.base = Some(now); @@ -71,7 +80,7 @@ impl RequestSet { None => return None, }; - let req = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); + let Entry(req, cost) = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); match self.reqs.keys().next() { Some(k) if *k > id => self.base = Some(now), @@ -79,6 +88,7 @@ impl RequestSet { _ => {} } + self.cumulative_cost = self.cumulative_cost - cost; Some(req) } @@ -93,7 +103,7 @@ impl RequestSet { let first_req = self.reqs.values().next() .expect("base existing implies `reqs` non-empty; qed"); - base + compute_timeout(&first_req) <= now + base + compute_timeout(&first_req.0) <= now } /// Collect all pending request ids. @@ -108,6 +118,9 @@ impl RequestSet { /// Whether the set is empty. pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// The cumulative cost of all requests in the set. + pub fn cumulative_cost(&self) -> U256 { self.cumulative_cost } } // helper to calculate timeout for a specific set of requests. @@ -141,8 +154,8 @@ mod tests { let the_req = RequestBuilder::default().build(); let req_time = compute_timeout(&the_req); - req_set.insert(ReqId(0), the_req.clone(), test_begin); - req_set.insert(ReqId(1), the_req, test_begin + Duration::seconds(1)); + req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin); + req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1)); assert_eq!(req_set.base, Some(test_begin)); @@ -153,4 +166,22 @@ mod tests { assert!(!req_set.check_timeout(test_end)); assert!(req_set.check_timeout(test_end + Duration::seconds(1))); } + + #[test] + fn cumulative_cost() { + let the_req = RequestBuilder::default().build(); + let test_begin = SteadyTime::now(); + let test_end = test_begin + Duration::seconds(1); + let mut req_set = RequestSet::default(); + + for i in 0..5 { + req_set.insert(ReqId(i), the_req.clone(), 1.into(), test_begin); + assert_eq!(req_set.cumulative_cost, (i + 1).into()); + } + + for i in (0..5).rev() { + assert!(req_set.remove(&ReqId(i), test_end).is_some()); + assert_eq!(req_set.cumulative_cost, i.into()); + } + } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index e2081534c..67dfe8131 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -600,8 +600,8 @@ fn id_guard() { let mut pending_requests = RequestSet::default(); - pending_requests.insert(req_id_1, req.clone(), ::time::SteadyTime::now()); - pending_requests.insert(req_id_2, req, ::time::SteadyTime::now()); + pending_requests.insert(req_id_1, req.clone(), 0.into(), ::time::SteadyTime::now()); + pending_requests.insert(req_id_2, req, 1.into(), ::time::SteadyTime::now()); proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { local_credits: flow_params.create_credits(), diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 4590103e7..1b092ab03 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -418,8 +418,10 @@ impl LightSync { let best_td = chain_info.pending_total_difficulty; let sync_target = match *self.best_seen.lock() { Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash), - _ => { - trace!(target: "sync", "No target to sync to."); + ref other => { + let network_score = other.as_ref().map(|target| target.head_td); + trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}", + network_score, best_td); *state = SyncState::Idle; return; } From 35d9a9815eb9798af58150ec7f986d890a1f8fcb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 20:14:40 +0100 Subject: [PATCH 51/89] mild abstraction of RPC dependencies --- parity/dapps.rs | 4 +- parity/rpc.rs | 22 ++-- parity/rpc_apis.rs | 264 ++++++++++++++++++++++++--------------------- parity/run.rs | 5 +- parity/signer.rs | 24 +++-- 5 files changed, 177 insertions(+), 142 deletions(-) diff --git a/parity/dapps.rs b/parity/dapps.rs index bbd5f4960..29268e904 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -59,7 +59,7 @@ impl Default for Configuration { } pub struct Dependencies { - pub apis: Arc, + pub apis: Arc, pub client: Arc, pub sync: Arc, pub remote: parity_reactor::TokioRemote, @@ -182,7 +182,7 @@ mod server { } else { rpc_apis::ApiSet::UnsafeContext }; - let apis = rpc_apis::setup_rpc(deps.stats, deps.apis.clone(), api_set); + let apis = rpc_apis::setup_rpc(deps.stats, &*deps.apis, api_set); let start_result = match auth { None => { server.start_unsecured_http(url, apis, deps.remote) diff --git a/parity/rpc.rs b/parity/rpc.rs index a435f24db..e4307f09b 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -82,8 +82,8 @@ impl fmt::Display for IpcConfiguration { } } -pub struct Dependencies { - pub apis: Arc, +pub struct Dependencies { + pub apis: Arc, pub remote: TokioRemote, pub stats: Arc, } @@ -109,7 +109,9 @@ impl rpc::IpcMetaExtractor for RpcExtractor { } } -pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { +pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> + where D: rpc_apis::Dependencies +{ if !conf.enabled { return Ok(None); } @@ -119,12 +121,14 @@ pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result MetaIoHandler { - rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) +fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler> + where D: rpc_apis::Dependencies +{ + rpc_apis::setup_rpc(deps.stats.clone(), &*deps.apis, apis) } -pub fn setup_http_rpc_server( - dependencies: &Dependencies, +pub fn setup_http_rpc_server( + dependencies: &Dependencies, url: &SocketAddr, cors_domains: Option>, allowed_hosts: Option>, @@ -145,12 +149,12 @@ pub fn setup_http_rpc_server( } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { +pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { if !conf.enabled { return Ok(None); } Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) } -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { +pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { let handler = setup_apis(apis, dependencies); let remote = dependencies.remote.clone(); match rpc::start_ipc(addr, handler, remote, RpcExtractor) { diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 469245c19..a27fbfa26 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -27,7 +27,7 @@ use ethcore::client::Client; use ethcore::miner::{Miner, ExternalMiner}; use ethcore::snapshot::SnapshotService; use ethcore_rpc::{Metadata, NetworkSettings}; -use ethcore_rpc::informant::{Middleware, RpcStats, ClientNotifier}; +use ethcore_rpc::informant::{ActivityNotifier, Middleware, RpcStats, ClientNotifier}; use ethcore_rpc::dispatch::FullDispatcher; use ethsync::{ManageNetwork, SyncProvider}; use hash_fetch::fetch::Client as FetchClient; @@ -112,25 +112,6 @@ impl FromStr for ApiSet { } } -pub struct Dependencies { - pub signer_service: Arc, - pub client: Arc, - pub snapshot: Arc, - pub sync: Arc, - pub net: Arc, - pub secret_store: Arc, - pub miner: Arc, - pub external_miner: Arc, - pub logger: Arc, - pub settings: Arc, - pub net_service: Arc, - pub updater: Arc, - pub geth_compatibility: bool, - pub dapps_interface: Option, - pub dapps_port: Option, - pub fetch: FetchClient, -} - fn to_modules(apis: &[Api]) -> BTreeMap { let mut modules = BTreeMap::new(); for api in apis { @@ -151,6 +132,145 @@ fn to_modules(apis: &[Api]) -> BTreeMap { modules } +/// RPC dependencies can be used to initialize RPC endpoints from APIs. +pub trait Dependencies { + type Notifier: ActivityNotifier; + + /// Create the activity notifier. + fn activity_notifier(&self) -> Self::Notifier; + + /// Extend the given I/O handler with endpoints for each API. + fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]); +} + +/// RPC dependencies for a full node. +pub struct FullDependencies { + pub signer_service: Arc, + pub client: Arc, + pub snapshot: Arc, + pub sync: Arc, + pub net: Arc, + pub secret_store: Arc, + pub miner: Arc, + pub external_miner: Arc, + pub logger: Arc, + pub settings: Arc, + pub net_service: Arc, + pub updater: Arc, + pub geth_compatibility: bool, + pub dapps_interface: Option, + pub dapps_port: Option, + pub fetch: FetchClient, +} + +impl Dependencies for FullDependencies { + type Notifier = ClientNotifier; + + fn activity_notifier(&self) -> ClientNotifier { + ClientNotifier { + client: self.client.clone(), + } + } + + fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &[Api]) { + use ethcore_rpc::v1::*; + + macro_rules! add_signing_methods { + ($namespace:ident, $handler:expr, $deps:expr) => { + { + let deps = &$deps; + let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); + if deps.signer_service.is_enabled() { + $handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) + } else { + $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) + } + } + } + } + + let dispatcher = FullDispatcher::new(Arc::downgrade(&self.client), Arc::downgrade(&self.miner)); + for api in apis { + match *api { + Api::Web3 => { + handler.extend_with(Web3Client::new().to_delegate()); + }, + Api::Net => { + handler.extend_with(NetClient::new(&self.sync).to_delegate()); + }, + Api::Eth => { + let client = EthClient::new( + &self.client, + &self.snapshot, + &self.sync, + &self.secret_store, + &self.miner, + &self.external_miner, + EthClientOptions { + pending_nonce_from_queue: self.geth_compatibility, + allow_pending_receipt_query: !self.geth_compatibility, + send_block_number_in_get_work: !self.geth_compatibility, + } + ); + handler.extend_with(client.to_delegate()); + + let filter_client = EthFilterClient::new(&self.client, &self.miner); + handler.extend_with(filter_client.to_delegate()); + + add_signing_methods!(EthSigning, handler, self); + }, + Api::Personal => { + handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); + }, + Api::Signer => { + handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service).to_delegate()); + }, + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with(ParityClient::new( + &self.client, + &self.miner, + &self.sync, + &self.updater, + &self.net_service, + &self.secret_store, + self.logger.clone(), + self.settings.clone(), + signer, + self.dapps_interface.clone(), + self.dapps_port, + ).to_delegate()); + + add_signing_methods!(EthSigning, handler, self); + add_signing_methods!(ParitySigning, handler, self); + }, + Api::ParityAccounts => { + handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate()); + }, + Api::ParitySet => { + handler.extend_with(ParitySetClient::new( + &self.client, + &self.miner, + &self.updater, + &self.net_service, + self.fetch.clone(), + ).to_delegate()) + }, + Api::Traces => { + handler.extend_with(TracesClient::new(&self.client, &self.miner).to_delegate()) + }, + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + } + } + } +} + impl ApiSet { pub fn list_apis(&self) -> HashSet { let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc] @@ -172,110 +292,12 @@ impl ApiSet { } } -macro_rules! add_signing_methods { - ($namespace:ident, $handler:expr, $deps:expr) => { - { - let handler = &mut $handler; - let deps = &$deps; - let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); - if deps.signer_service.is_enabled() { - handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) - } else { - handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) - } - } - } -} - -pub fn setup_rpc(stats: Arc, deps: Arc, apis: ApiSet) -> MetaIoHandler { - use ethcore_rpc::v1::*; - - let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, ClientNotifier { - client: deps.client.clone(), - })); - +pub fn setup_rpc(stats: Arc, deps: &D, apis: ApiSet) -> MetaIoHandler> { + let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, deps.activity_notifier())); // it's turned into vector, cause ont of the cases requires &[] let apis = apis.list_apis().into_iter().collect::>(); - let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); + deps.extend_with_set(&mut handler, &apis[..]); - for api in &apis { - match *api { - Api::Web3 => { - handler.extend_with(Web3Client::new().to_delegate()); - }, - Api::Net => { - handler.extend_with(NetClient::new(&deps.sync).to_delegate()); - }, - Api::Eth => { - let client = EthClient::new( - &deps.client, - &deps.snapshot, - &deps.sync, - &deps.secret_store, - &deps.miner, - &deps.external_miner, - EthClientOptions { - pending_nonce_from_queue: deps.geth_compatibility, - allow_pending_receipt_query: !deps.geth_compatibility, - send_block_number_in_get_work: !deps.geth_compatibility, - } - ); - handler.extend_with(client.to_delegate()); - - let filter_client = EthFilterClient::new(&deps.client, &deps.miner); - handler.extend_with(filter_client.to_delegate()); - - add_signing_methods!(EthSigning, handler, deps); - }, - Api::Personal => { - handler.extend_with(PersonalClient::new(&deps.secret_store, dispatcher.clone(), deps.geth_compatibility).to_delegate()); - }, - Api::Signer => { - handler.extend_with(SignerClient::new(&deps.secret_store, dispatcher.clone(), &deps.signer_service).to_delegate()); - }, - Api::Parity => { - let signer = match deps.signer_service.is_enabled() { - true => Some(deps.signer_service.clone()), - false => None, - }; - handler.extend_with(ParityClient::new( - &deps.client, - &deps.miner, - &deps.sync, - &deps.updater, - &deps.net_service, - &deps.secret_store, - deps.logger.clone(), - deps.settings.clone(), - signer, - deps.dapps_interface.clone(), - deps.dapps_port, - ).to_delegate()); - - add_signing_methods!(EthSigning, handler, deps); - add_signing_methods!(ParitySigning, handler, deps); - }, - Api::ParityAccounts => { - handler.extend_with(ParityAccountsClient::new(&deps.secret_store).to_delegate()); - }, - Api::ParitySet => { - handler.extend_with(ParitySetClient::new( - &deps.client, - &deps.miner, - &deps.updater, - &deps.net_service, - deps.fetch.clone(), - ).to_delegate()) - }, - Api::Traces => { - handler.extend_with(TracesClient::new(&deps.client, &deps.miner).to_delegate()) - }, - Api::Rpc => { - let modules = to_modules(&apis); - handler.extend_with(RpcClient::new(modules).to_delegate()); - } - } - } handler } diff --git a/parity/run.rs b/parity/run.rs index 92aba9ed7..be4e25f97 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -498,7 +498,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // set up dependencies for rpc servers let rpc_stats = Arc::new(informant::RpcStats::default()); let signer_path = cmd.signer_conf.signer_path.clone(); - let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { + let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies { signer_service: Arc::new(rpc_apis::SignerService::new(move || { signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) }, cmd.ui_address)), @@ -553,7 +553,8 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R remote: event_loop.raw_remote(), rpc_stats: rpc_stats.clone(), }; - let signer_server = signer::start(cmd.signer_conf.clone(), signer_deps)?; + let signing_queue = deps_for_rpc_apis.signer_service.queue(); + let signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; // secret store key server let secretstore_deps = secretstore::Dependencies { }; diff --git a/parity/signer.rs b/parity/signer.rs index 0d71604d4..b4c70494d 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -23,7 +23,7 @@ pub use ethcore_signer::Server as SignerServer; use ansi_term::Colour; use dir::default_data_path; use ethcore_rpc::informant::RpcStats; -use ethcore_rpc; +use ethcore_rpc::{self, ConfirmationsQueue}; use ethcore_signer as signer; use helpers::replace_home; use parity_reactor::TokioRemote; @@ -55,8 +55,8 @@ impl Default for Configuration { } } -pub struct Dependencies { - pub apis: Arc, +pub struct Dependencies { + pub apis: Arc, pub remote: TokioRemote, pub rpc_stats: Arc, } @@ -77,11 +77,15 @@ impl signer::MetaExtractor for StandardExtractor { } } -pub fn start(conf: Configuration, deps: Dependencies) -> Result, String> { +pub fn start( + conf: Configuration, + queue: Arc, + deps: Dependencies, +) -> Result, String> { if !conf.enabled { Ok(None) } else { - Ok(Some(do_start(conf, deps)?)) + Ok(Some(do_start(conf, queue, deps)?)) } } @@ -125,14 +129,18 @@ pub fn generate_new_token(path: String) -> io::Result { Ok(code) } -fn do_start(conf: Configuration, deps: Dependencies) -> Result { +fn do_start( + conf: Configuration, + queue: Arc, + deps: Dependencies +) -> Result { let addr = format!("{}:{}", conf.interface, conf.port) .parse() .map_err(|_| format!("Invalid port specified: {}", conf.port))?; let start_result = { let server = signer::ServerBuilder::new( - deps.apis.signer_service.queue(), + queue, codes_path(conf.signer_path), ); if conf.skip_origin_validation { @@ -141,7 +149,7 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result Date: Wed, 22 Mar 2017 21:09:43 +0100 Subject: [PATCH 52/89] light client RPC dependencies --- ethcore/light/src/lib.rs | 1 + parity/rpc_apis.rs | 115 ++++++++++++++++++++++++++++++++- rpc/src/v1/helpers/dispatch.rs | 1 - rpc/src/v1/impls/light/mod.rs | 3 + rpc/src/v1/impls/light/net.rs | 49 ++++++++++++++ rpc/src/v1/impls/net.rs | 2 +- sync/src/api.rs | 9 +++ 7 files changed, 175 insertions(+), 5 deletions(-) create mode 100644 rpc/src/v1/impls/light/net.rs diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 828d77043..82b6ea126 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -55,6 +55,7 @@ pub mod remote { mod types; +pub use self::cache::Cache; pub use self::provider::Provider; pub use self::transaction_queue::TransactionQueue; pub use types::request as request; diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index a27fbfa26..3f8f38a3a 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -28,12 +28,13 @@ use ethcore::miner::{Miner, ExternalMiner}; use ethcore::snapshot::SnapshotService; use ethcore_rpc::{Metadata, NetworkSettings}; use ethcore_rpc::informant::{ActivityNotifier, Middleware, RpcStats, ClientNotifier}; -use ethcore_rpc::dispatch::FullDispatcher; -use ethsync::{ManageNetwork, SyncProvider}; +use ethcore_rpc::dispatch::{FullDispatcher, LightDispatcher}; +use ethsync::{ManageNetwork, SyncProvider, LightSync}; use hash_fetch::fetch::Client as FetchClient; use jsonrpc_core::{MetaIoHandler}; +use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; use updater::Updater; -use util::RotatingLogger; +use util::{Mutex, RwLock, RotatingLogger}; #[derive(Debug, PartialEq, Clone, Eq, Hash)] pub enum Api { @@ -271,6 +272,114 @@ impl Dependencies for FullDependencies { } } +/// Light client notifier. Doesn't do anything yet, but might in the future. +pub struct LightClientNotifier; + +impl ActivityNotifier for LightClientNotifier { + fn active(&self) {} +} + +/// RPC dependencies for a light client. +pub struct LightDependencies { + pub signer_service: Arc, + pub client: Arc<::light::client::Client>, + pub sync: Arc, + pub net: Arc, + pub secret_store: Arc, + pub logger: Arc, + pub settings: Arc, + pub on_demand: Arc<::light::on_demand::OnDemand>, + pub cache: Arc>, + pub transaction_queue: Arc>, + pub updater: Arc, + pub dapps_interface: Option, + pub dapps_port: Option, + pub fetch: FetchClient, + pub geth_compatibility: bool, +} + +impl Dependencies for LightDependencies { + type Notifier = LightClientNotifier; + + fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } + fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]) { + use ethcore_rpc::v1::*; + + let dispatcher = LightDispatcher::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + self.transaction_queue.clone(), + ); + + for api in apis { + match *api { + Api::Web3 => { + handler.extend_with(Web3Client::new().to_delegate()); + }, + Api::Net => { + handler.extend_with(light::NetClient::new(self.sync.clone()).to_delegate()); + }, + Api::Eth => { + let client = light::EthClient::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.transaction_queue.clone(), + self.secret_store.clone(), + self.cache.clone(), + ); + handler.extend_with(client.to_delegate()); + + // TODO: filters and signing methods. + }, + Api::Personal => { + handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); + }, + Api::Signer => { + handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service).to_delegate()); + }, + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with(light::ParityClient::new( + Arc::new(dispatcher.clone()), + self.secret_store.clone(), + self.logger.clone(), + self.settings.clone(), + signer, + self.dapps_interface.clone(), + self.dapps_port, + ).to_delegate()); + + // TODO + //add_signing_methods!(EthSigning, handler, self); + //add_signing_methods!(ParitySigning, handler, self); + }, + Api::ParityAccounts => { + handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate()); + }, + Api::ParitySet => { + handler.extend_with(light::ParitySetClient::new( + self.sync.clone(), + self.fetch.clone(), + ).to_delegate()) + }, + Api::Traces => { + handler.extend_with(light::TracesClient.to_delegate()) + }, + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + } + } + } +} + impl ApiSet { pub fn list_apis(&self) -> HashSet { let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc] diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 8a99a7239..e1b298b9f 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -207,7 +207,6 @@ pub fn fetch_gas_price_corpus( } /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. -/// Light client `ETH` RPC. #[derive(Clone)] pub struct LightDispatcher { /// Sync service. diff --git a/rpc/src/v1/impls/light/mod.rs b/rpc/src/v1/impls/light/mod.rs index 8c2e6d240..38ba2438e 100644 --- a/rpc/src/v1/impls/light/mod.rs +++ b/rpc/src/v1/impls/light/mod.rs @@ -23,7 +23,10 @@ pub mod eth; pub mod parity; pub mod parity_set; pub mod trace; +pub mod net; pub use self::eth::EthClient; pub use self::parity::ParityClient; pub use self::parity_set::ParitySetClient; +pub use self::net::NetClient; +pub use self::trace::TracesClient; diff --git a/rpc/src/v1/impls/light/net.rs b/rpc/src/v1/impls/light/net.rs new file mode 100644 index 000000000..4f0ede48f --- /dev/null +++ b/rpc/src/v1/impls/light/net.rs @@ -0,0 +1,49 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Net rpc implementation. +use std::sync::Arc; +use jsonrpc_core::Error; +use ethsync::LightSyncProvider; +use v1::traits::Net; + +/// Net rpc implementation. +pub struct NetClient { + sync: Arc +} + +impl NetClient where S: LightSyncProvider { + /// Creates new NetClient. + pub fn new(sync: Arc) -> Self { + NetClient { + sync: sync, + } + } +} + +impl Net for NetClient where S: LightSyncProvider { + fn version(&self) -> Result { + Ok(format!("{}", self.sync.network_id()).to_owned()) + } + + fn peer_count(&self) -> Result { + Ok(format!("0x{:x}", self.sync.peer_numbers().connected as u64).to_owned()) + } + + fn is_listening(&self) -> Result { + Ok(true) + } +} diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 5588805ab..399b2201a 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -21,7 +21,7 @@ use ethsync::SyncProvider; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient where S: SyncProvider { +pub struct NetClient { sync: Weak } diff --git a/sync/src/api.rs b/sync/src/api.rs index 010bbeb23..e6c093893 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -642,6 +642,9 @@ pub trait LightSyncProvider { /// Get peers information fn peers(&self) -> Vec; + /// Get network id. + fn network_id(&self) -> u64; + /// Get the enode if available. fn enode(&self) -> Option; @@ -666,6 +669,7 @@ pub struct LightSync { proto: Arc, network: NetworkService, subprotocol_name: [u8; 3], + network_id: u64, } impl LightSync { @@ -701,6 +705,7 @@ impl LightSync { proto: light_proto, network: service, subprotocol_name: params.subprotocol_name, + network_id: params.network_id, }) } @@ -796,6 +801,10 @@ impl LightSyncProvider for LightSync { self.network.external_url() } + fn network_id(&self) -> u64 { + self.network_id + } + fn transactions_stats(&self) -> BTreeMap { Default::default() // TODO } From 83911a7290d7d9090946f616e66082e5fd7c3ead Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 22:00:52 +0100 Subject: [PATCH 53/89] complete quick'n'dirty light CLI --- parity/dapps.rs | 100 +++++++++++++++++++++++---------------------- parity/rpc_apis.rs | 1 - parity/run.rs | 95 ++++++++++++++++++++++++++++++++++++------ sync/src/api.rs | 6 +++ 4 files changed, 140 insertions(+), 62 deletions(-) diff --git a/parity/dapps.rs b/parity/dapps.rs index 29268e904..970f51b96 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -18,13 +18,15 @@ use std::path::PathBuf; use std::sync::Arc; use dir::default_data_path; -use ethcore::client::Client; +use ethcore::client::{Client, BlockChainClient, BlockId}; +use ethcore::transaction::{Transaction, Action}; use ethcore_rpc::informant::RpcStats; -use ethsync::SyncProvider; use hash_fetch::fetch::Client as FetchClient; +use hash_fetch::urlhint::ContractClient; use helpers::replace_home; use rpc_apis::{self, SignerService}; use parity_reactor; +use util::{Bytes, Address, U256}; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { @@ -58,17 +60,56 @@ impl Default for Configuration { } } -pub struct Dependencies { - pub apis: Arc, +/// Registrar implementation of the full client. +pub struct FullRegistrar { + /// Handle to the full client. pub client: Arc, - pub sync: Arc, +} + +impl ContractClient for FullRegistrar { + fn registrar(&self) -> Result { + self.client.additional_params().get("registrar") + .ok_or_else(|| "Registrar not defined.".into()) + .and_then(|registrar| { + registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e)) + }) + } + + fn call(&self, address: Address, data: Bytes) -> Result { + let from = Address::default(); + let transaction = Transaction { + nonce: self.client.latest_nonce(&from), + action: Action::Call(address), + gas: U256::from(50_000_000), + gas_price: U256::default(), + value: U256::default(), + data: data, + }.fake_sign(from); + + self.client.call(&transaction, BlockId::Latest, Default::default()) + .map_err(|e| format!("{:?}", e)) + .map(|executed| { + executed.output + }) + } +} + +// TODO: light client implementation forwarding to OnDemand and waiting for future +// to resolve. + +pub struct Dependencies { + pub apis: Arc, + pub sync_status: Arc<::ethcore_dapps::SyncStatus>, + pub contract_client: Arc, pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, pub stats: Arc, } -pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { +pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> + where D: rpc_apis::Dependencies +{ if !configuration.enabled { return Ok(None); } @@ -130,21 +171,16 @@ mod server { use std::sync::Arc; use std::net::SocketAddr; use std::io; - use util::{Bytes, Address, U256}; use ansi_term::Colour; - use ethcore::transaction::{Transaction, Action}; - use ethcore::client::{Client, BlockChainClient, BlockId}; use ethcore_dapps::{AccessControlAllowOrigin, Host}; - use ethcore_rpc::is_major_importing; - use hash_fetch::urlhint::ContractClient; use parity_reactor; use rpc_apis; pub use ethcore_dapps::Server as WebappServer; - pub fn setup_dapps_server( - deps: Dependencies, + pub fn setup_dapps_server( + deps: Dependencies, dapps_path: PathBuf, extra_dapps: Vec, url: &SocketAddr, @@ -157,18 +193,16 @@ mod server { let server = dapps::ServerBuilder::new( &dapps_path, - Arc::new(Registrar { client: deps.client.clone() }), + deps.contract_client, parity_reactor::Remote::new(deps.remote.clone()), ); let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); let cors: Option> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); - let sync = deps.sync.clone(); - let client = deps.client.clone(); let signer = deps.signer.clone(); let server = server .fetch(deps.fetch.clone()) - .sync_status(Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info()))) + .sync_status(deps.sync_status) .web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token))) .extra_dapps(&extra_dapps) .signer_address(deps.signer.address()) @@ -201,36 +235,4 @@ mod server { Ok(server) => Ok(server), } } - - struct Registrar { - client: Arc, - } - - impl ContractClient for Registrar { - fn registrar(&self) -> Result { - self.client.additional_params().get("registrar") - .ok_or_else(|| "Registrar not defined.".into()) - .and_then(|registrar| { - registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e)) - }) - } - - fn call(&self, address: Address, data: Bytes) -> Result { - let from = Address::default(); - let transaction = Transaction { - nonce: self.client.latest_nonce(&from), - action: Action::Call(address), - gas: U256::from(50_000_000), - gas_price: U256::default(), - value: U256::default(), - data: data, - }.fake_sign(from); - - self.client.call(&transaction, BlockId::Latest, Default::default()) - .map_err(|e| format!("{:?}", e)) - .map(|executed| { - executed.output - }) - } - } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 3f8f38a3a..6f5a0c5f8 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -291,7 +291,6 @@ pub struct LightDependencies { pub on_demand: Arc<::light::on_demand::OnDemand>, pub cache: Arc>, pub transaction_queue: Arc>, - pub updater: Arc, pub dapps_interface: Option, pub dapps_port: Option, pub fetch: FetchClient, diff --git a/parity/run.rs b/parity/run.rs index be4e25f97..74cae07fd 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -30,6 +30,7 @@ use ethcore::account_provider::{AccountProvider, AccountProviderSettings}; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; use ethcore::snapshot; use ethcore::verification::queue::VerifierSettings; +use light::Cache as LightDataCache; use ethsync::SyncConfig; use informant::Informant; use updater::{UpdatePolicy, Updater}; @@ -61,6 +62,10 @@ const SNAPSHOT_PERIOD: u64 = 10000; // how many blocks to wait before starting a periodic snapshot. const SNAPSHOT_HISTORY: u64 = 100; +// Number of minutes before a given gas price corpus should expire. +// Light client only. +const GAS_CORPUS_EXPIRATION_MINUTES: i64 = 60 * 6; + // Pops along with error messages when a password is missing or invalid. const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file."; @@ -155,7 +160,7 @@ impl ::local_store::NodeInfo for FullNodeInfo { } // helper for light execution. -fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) -> Result<(bool, Option), String> { +fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { use light::client as light_client; use ethsync::{LightSyncParams, LightSync, ManageNetwork}; use util::RwLock; @@ -206,7 +211,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) - let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm)) .map_err(|e| format!("Error starting light client: {}", e))?; let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); - let provider = ::light::provider::LightProvider::new(service.client().clone(), txq); + let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone()); // start network. // set up bootnodes @@ -215,6 +220,13 @@ fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) - net_conf.boot_nodes = spec.nodes.clone(); } + // TODO: configurable cache size. + let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES)); + let cache = Arc::new(::util::Mutex::new(cache)); + + // start on_demand service. + let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); + // set network path. net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); let sync_params = LightSyncParams { @@ -222,16 +234,70 @@ fn execute_light(cmd: RunCmd, can_restart: bool, _logger: Arc) - client: Arc::new(provider), network_id: cmd.network_id.unwrap_or(spec.network_id()), subprotocol_name: ::ethsync::LIGHT_PROTOCOL, + handlers: vec![on_demand.clone()], }; let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; + let light_sync = Arc::new(light_sync); light_sync.start_network(); // start RPCs. + // spin up event loop + let event_loop = EventLoop::spawn(); + + // fetch service + let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; // prepare account provider - let _account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); - // rest TODO + let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); + let rpc_stats = Arc::new(informant::RpcStats::default()); + let signer_path = cmd.signer_conf.signer_path.clone(); + + let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { + signer_service: Arc::new(rpc_apis::SignerService::new(move || { + signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) + }, cmd.ui_address)), + client: service.client().clone(), + sync: light_sync.clone(), + net: light_sync.clone(), + secret_store: account_provider, + logger: logger, + settings: Arc::new(cmd.net_settings), + on_demand: on_demand, + cache: cache, + transaction_queue: txq, + dapps_interface: match cmd.dapps_conf.enabled { + true => Some(cmd.dapps_conf.interface.clone()), + false => None, + }, + dapps_port: match cmd.dapps_conf.enabled { + true => Some(cmd.dapps_conf.port), + false => None, + }, + fetch: fetch, + geth_compatibility: cmd.geth_compatibility, + }); + + let dependencies = rpc::Dependencies { + apis: deps_for_rpc_apis.clone(), + remote: event_loop.raw_remote(), + stats: rpc_stats.clone(), + }; + + // start rpc servers + let _http_server = rpc::new_http(cmd.http_conf, &dependencies)?; + let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; + + // the signer server + let signer_deps = signer::Dependencies { + apis: deps_for_rpc_apis.clone(), + remote: event_loop.raw_remote(), + rpc_stats: rpc_stats.clone(), + }; + let signing_queue = deps_for_rpc_apis.signer_service.queue(); + let _signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; + + // TODO: Dapps // wait for ctrl-c. Ok(wait_for_exit(panic_handler, None, None, can_restart)) @@ -536,14 +602,19 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; // the dapps server - let dapps_deps = dapps::Dependencies { - apis: deps_for_rpc_apis.clone(), - client: client.clone(), - sync: sync_provider.clone(), - remote: event_loop.raw_remote(), - fetch: fetch.clone(), - signer: deps_for_rpc_apis.signer_service.clone(), - stats: rpc_stats.clone(), + let dapps_deps = { + let (sync, client) = (sync_provider.clone(), client.clone()); + let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() }); + + dapps::Dependencies { + apis: deps_for_rpc_apis.clone(), + sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())), + contract_client: contract_client, + remote: event_loop.raw_remote(), + fetch: fetch.clone(), + signer: deps_for_rpc_apis.signer_service.clone(), + stats: rpc_stats.clone(), + } }; let dapps_server = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; diff --git a/sync/src/api.rs b/sync/src/api.rs index e6c093893..927d8fce6 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -662,6 +662,8 @@ pub struct LightSyncParams { pub network_id: u64, /// Subprotocol name. pub subprotocol_name: [u8; 3], + /// Other handlers to attach. + pub handlers: Vec>, } /// Service for light synchronization. @@ -696,6 +698,10 @@ impl LightSync { let sync_handler = try!(SyncHandler::new(params.client.clone())); light_proto.add_handler(Arc::new(sync_handler)); + for handler in params.handlers { + light_proto.add_handler(handler); + } + Arc::new(light_proto) }; From 23a6b1998512960383cf377c08df4203eeb4ab1d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 22 Mar 2017 22:10:02 +0100 Subject: [PATCH 54/89] fix import --- parity/rpc_apis.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index ad61dc8ca..76e54ff81 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -34,7 +34,7 @@ use hash_fetch::fetch::Client as FetchClient; use jsonrpc_core::{MetaIoHandler}; use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; use updater::Updater; -use util::{Mutex, RwLock, RotatingLogger}; +use util::{Mutex, RwLock}; use ethcore_logger::RotatingLogger; #[derive(Debug, PartialEq, Clone, Eq, Hash)] From a55001ad1de1a291dfa3800a76bc242828450c45 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 02:55:25 +0100 Subject: [PATCH 55/89] fix deadlock in on_demand --- ethcore/light/src/on_demand/mod.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 8d451c88e..6e37a74ae 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -210,7 +210,7 @@ impl OnDemand { /// it as easily. pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { let (sender, receiver) = oneshot::channel(); - match self.cache.lock().block_header(&req.0) { + match { self.cache.lock().block_header(&req.0) } { Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } @@ -232,7 +232,7 @@ impl OnDemand { sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } else { - match self.cache.lock().block_body(&req.hash) { + match { self.cache.lock().block_body(&req.hash) } { Some(body) => { let mut stream = RlpStream::new_list(3); stream.append_raw(&req.header.into_inner(), 1); @@ -255,7 +255,7 @@ impl OnDemand { if req.0.receipts_root() == SHA3_NULL_RLP { sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); } else { - match self.cache.lock().block_receipts(&req.0.hash()) { + match { self.cache.lock().block_receipts(&req.0.hash()) } { Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } @@ -397,10 +397,12 @@ impl Handler for OnDemand { } fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { - let mut peers = self.peers.write(); - if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { - peer.status.update_from(&announcement); - peer.capabilities.update_from(&announcement); + { + let mut peers = self.peers.write(); + if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { + peer.status.update_from(&announcement); + peer.capabilities.update_from(&announcement); + } } self.dispatch_orphaned(ctx.as_basic()); From b96eb458770fd6e3a9b34115255ef2e36c4f8bbc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 03:23:53 +0100 Subject: [PATCH 56/89] eth_syncing RPC for light client --- rpc/src/v1/impls/light/eth.rs | 17 +++++++++++++++- sync/src/api.rs | 16 +++++++++++---- sync/src/light_sync/mod.rs | 37 +++++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 251daf90d..ba32bf35c 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -245,7 +245,22 @@ impl Eth for EthClient { } fn syncing(&self) -> Result { - rpc_unimplemented!() + if self.sync.is_major_importing() { + let chain_info = self.client.chain_info(); + let current_block = U256::from(chain_info.best_block_number); + let highest_block = self.sync.highest_block().map(U256::from) + .unwrap_or_else(|| current_block.clone()); + + Ok(SyncStatus::Info(SyncInfo { + starting_block: U256::from(self.sync.start_block()).into(), + current_block: current_block.into(), + highest_block: highest_block.into(), + warp_chunks_amount: None, + warp_chunks_processed: None, + })) + } else { + Ok(SyncStatus::None) + } } fn author(&self, _meta: Self::Metadata) -> BoxFuture { diff --git a/sync/src/api.rs b/sync/src/api.rs index 927d8fce6..bfa33e7b8 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -669,6 +669,7 @@ pub struct LightSyncParams { /// Service for light synchronization. pub struct LightSync { proto: Arc, + sync: Arc<::light_sync::SyncInfo + Sync + Send>, network: NetworkService, subprotocol_name: [u8; 3], network_id: u64, @@ -682,7 +683,7 @@ impl LightSync { use light_sync::LightSync as SyncHandler; // initialize light protocol handler and attach sync module. - let light_proto = { + let (sync, light_proto) = { let light_params = LightParams { network_id: params.network_id, flow_params: Default::default(), // or `None`? @@ -695,20 +696,21 @@ impl LightSync { }; let mut light_proto = LightProtocol::new(params.client.clone(), light_params); - let sync_handler = try!(SyncHandler::new(params.client.clone())); - light_proto.add_handler(Arc::new(sync_handler)); + let sync_handler = Arc::new(try!(SyncHandler::new(params.client.clone()))); + light_proto.add_handler(sync_handler.clone()); for handler in params.handlers { light_proto.add_handler(handler); } - Arc::new(light_proto) + (sync_handler, Arc::new(light_proto)) }; let service = try!(NetworkService::new(params.network_config)); Ok(LightSync { proto: light_proto, + sync: sync, network: service, subprotocol_name: params.subprotocol_name, network_id: params.network_id, @@ -726,6 +728,12 @@ impl LightSync { } } +impl ::std::ops::Deref for LightSync { + type Target = ::light_sync::SyncInfo; + + fn deref(&self) -> &Self::Target { &*self.sync } +} + impl ManageNetwork for LightSync { fn accept_unreserved_peers(&self) { self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 1b092ab03..18fe3c953 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -206,6 +206,7 @@ impl<'a> ResponseContext for ResponseCtx<'a> { /// Light client synchronization manager. See module docs for more details. pub struct LightSync { + start_block_number: u64, best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. client: Arc, @@ -525,6 +526,7 @@ impl LightSync { /// so it can act on events. pub fn new(client: Arc) -> Result { Ok(LightSync { + start_block_number: client.as_light_client().chain_info().best_block_number, best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), client: client, @@ -533,3 +535,38 @@ impl LightSync { }) } } + +/// Trait for erasing the type of a light sync object and exposing read-only methods. +pub trait SyncInfo { + /// Get the highest block advertised on the network. + fn highest_block(&self) -> Option; + + /// Get the block number at the time of sync start. + fn start_block(&self) -> u64; + + /// Whether major sync is underway. + fn is_major_importing(&self) -> bool; +} + +impl SyncInfo for LightSync { + fn highest_block(&self) -> Option { + self.best_seen.lock().as_ref().map(|x| x.head_num) + } + + fn start_block(&self) -> u64 { + self.start_block_number + } + + fn is_major_importing(&self) -> bool { + const EMPTY_QUEUE: usize = 3; + + if self.client.as_light_client().queue_info().unverified_queue_size > EMPTY_QUEUE { + return true; + } + + match *self.state.lock() { + SyncState::Idle => false, + _ => true, + } + } +} From 0d110ed47c72c4c6b51f7219cc88efa0dfa48ec8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 04:00:22 +0100 Subject: [PATCH 57/89] apply pending changes to chain after DB commit --- ethcore/light/src/client/header_chain.rs | 29 ++++++++++++++++++++---- ethcore/light/src/client/mod.rs | 10 ++++---- 2 files changed, 31 insertions(+), 8 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 022bae04e..8256595c3 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -126,6 +126,11 @@ fn era_key(number: u64) -> String { format!("candidates_{}", number) } +/// Pending changes from `insert` to be applied after the database write has finished. +pub struct PendingChanges { + best_block: Option, // new best block. +} + /// Header chain. See module docs for more details. pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. @@ -203,10 +208,15 @@ impl HeaderChain { /// Insert a pre-verified header. /// /// This blindly trusts that the data given to it is sensible. - pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result<(), BlockError> { + /// Returns a set of pending changes to be applied with `apply_pending` + /// before the next call to insert and after the transaction has been written. + pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result { let hash = header.hash(); let number = header.number(); let parent_hash = *header.parent_hash(); + let mut pending = PendingChanges { + best_block: None, + }; // hold candidates the whole time to guard import order. let mut candidates = self.candidates.write(); @@ -286,11 +296,11 @@ impl HeaderChain { } trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); - *self.best_block.write() = BlockDescriptor { + pending.best_block = Some(BlockDescriptor { hash: hash, number: number, total_difficulty: total_difficulty, - }; + }); // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); @@ -334,7 +344,15 @@ impl HeaderChain { stream.append(&best_num).append(&latest_num); transaction.put(self.col, CURRENT_KEY, &stream.out()) } - Ok(()) + Ok(pending) + } + + /// Apply pending changes from a previous `insert` operation. + /// Must be done before the next `insert` call. + pub fn apply_pending(&self, pending: PendingChanges) { + if let Some(best_block) = pending.best_block { + *self.best_block.write() = best_block; + } } /// Get a block header. In the case of query by number, only canonical blocks @@ -360,6 +378,9 @@ impl HeaderChain { .and_then(load_from_db) } BlockId::Latest | BlockId::Pending => { + // hold candidates hear to prevent deletion of the header + // as we read it. + let _candidates = self.candidates.read(); let hash = { let best = self.best_block.read(); if best.number == 0 { diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index 92866da6d..d294053e1 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -223,18 +223,20 @@ impl Client { let (num, hash) = (verified_header.number(), verified_header.hash()); let mut tx = self.db.transaction(); - match self.chain.insert(&mut tx, verified_header) { - Ok(()) => { + let pending = match self.chain.insert(&mut tx, verified_header) { + Ok(pending) => { good.push(hash); self.report.write().blocks_imported += 1; + pending } Err(e) => { debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e); bad.push(hash); + break; } - } + }; self.db.write_buffered(tx); - + self.chain.apply_pending(pending); if let Err(e) = self.db.flush() { panic!("Database flush failed: {}. Check disk health and space.", e); } From 4eb69dc0fe09ff2252dd8754bfbb2a865021640d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 04:36:49 +0100 Subject: [PATCH 58/89] reintroduce credits recharging --- ethcore/light/src/net/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 624bd1041..395d42d0d 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -303,10 +303,18 @@ impl LightProtocol { match peer.remote_flow { None => Err(Error::NotServer), Some((ref mut creds, ref params)) => { + // apply recharge to credits if there's no pending requests. + if peer.pending_requests.is_empty() { + params.recharge(creds); + } + // compute and deduct cost. let cost = params.compute_cost_multi(requests.requests()); creds.deduct_cost(cost)?; + trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", + peer_id, cost, creds.current()); + let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); io.send(*peer_id, packet::REQUEST, { let mut stream = RlpStream::new_list(2); @@ -686,6 +694,8 @@ impl LightProtocol { trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); // deserialize requests, check costs and request validity. + self.flow_params.recharge(&mut peer.local_credits); + peer.local_credits.deduct_cost(self.flow_params.base_cost())?; for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; @@ -712,6 +722,7 @@ impl LightProtocol { }); trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); + trace!(target: "pip", "Peer {} has {} credits remaining.", peer_id, peer.local_credits.current()); io.respond(packet::RESPONSE, { let mut stream = RlpStream::new_list(3); From 77f036ee21b573407fd3ca3b4544941f9cc66232 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 04:38:08 +0100 Subject: [PATCH 59/89] fix capabilities-interpreting error in on_demand --- ethcore/light/src/on_demand/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 6e37a74ae..20875c1a1 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -57,15 +57,15 @@ impl Peer { self.capabilities.serve_headers && self.status.head_num > req.num(), Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, Pending::Block(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.header.number()), Pending::BlockReceipts(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.0.number()), + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.0.number()), Pending::Account(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), Pending::Code(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.block_id.1), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.block_id.1), Pending::TxProof(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), } } } From ec52a4a2357292452d7a393788a11b4ba9895bed Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 13:24:04 +0100 Subject: [PATCH 60/89] more tracing in on-demand --- ethcore/light/src/on_demand/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 20875c1a1..f658a1f2c 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -319,7 +319,6 @@ impl OnDemand { } } - trace!(target: "on_demand", "No suitable peer for request"); self.orphaned_requests.write().push(pending); } @@ -353,6 +352,7 @@ impl OnDemand { let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); + trace!(target: "on_demand", "Attempting to dispatch {} orphaned requests.", to_dispatch.len()); for mut orphaned in to_dispatch { let hung_up = match orphaned { Pending::HeaderProof(_, ref mut sender) => match *sender { From a1df49ef3ee623c02c50f5be60e40e559bb56170 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:04:26 +0100 Subject: [PATCH 61/89] add test for request vec deserialization --- ethcore/light/src/types/request/mod.rs | 27 ++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 83d7963ac..c7bc8776d 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -1707,4 +1707,31 @@ mod tests { assert_eq!(rlp.val_at::(0).unwrap(), 100usize); assert_eq!(rlp.list_at::(1).unwrap(), reqs); } + + #[test] + fn responses_vec() { + let mut stream = RlpStream::new_list(2); + stream.begin_list(0).begin_list(0); + + let body = ::ethcore::encoded::Body::new(stream.out()); + let reqs = vec![ + Response::Headers(HeadersResponse { headers: vec![] }), + Response::HeaderProof(HeaderProofResponse { proof: vec![], hash: Default::default(), td: 100.into()}), + Response::Receipts(ReceiptsResponse { receipts: vec![Default::default()] }), + Response::Body(BodyResponse { body: body }), + Response::Account(AccountResponse { + proof: vec![], + nonce: 100.into(), + balance: 123.into(), + code_hash: Default::default(), + storage_root: Default::default() + }), + Response::Storage(StorageResponse { proof: vec![], value: H256::default() }), + Response::Code(CodeResponse { code: vec![1, 2, 3, 4, 5] }), + Response::Execution(ExecutionResponse { items: vec![] }), + ]; + + let raw = ::rlp::encode_list(&reqs); + assert_eq!(::rlp::decode_list::(&raw), reqs); + } } From ac7f1f6719621e9390f1f97bc64a6aaa43cf97d0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:15:13 +0100 Subject: [PATCH 62/89] fix header chain tests --- ethcore/light/src/client/header_chain.rs | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 8256595c3..1c218204b 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -517,8 +517,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -548,8 +549,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -566,8 +568,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -589,8 +592,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 11; } @@ -640,8 +644,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -676,8 +681,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -692,8 +698,9 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - chain.insert(&mut tx, header).unwrap(); + let pending = chain.insert(&mut tx, header).unwrap(); db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } From 54eb575000c870948239179bbdc23c7e6cd9c902 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:38:23 +0100 Subject: [PATCH 63/89] request tests that demonstrate broken RLP behavior --- ethcore/light/src/types/request/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index c7bc8776d..f640687d5 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -1601,7 +1601,7 @@ mod tests { let full_req = Request::Account(req.clone()); let res = AccountResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], nonce: 100.into(), balance: 123456.into(), code_hash: Default::default(), @@ -1625,7 +1625,7 @@ mod tests { let full_req = Request::Storage(req.clone()); let res = StorageResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], value: H256::default(), }; let full_res = Response::Storage(res.clone()); From 1485dd07aed9b83ee5eafcc61bea1968da557c7c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:38:32 +0100 Subject: [PATCH 64/89] use prev credits in tracing --- ethcore/light/src/net/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 395d42d0d..56b078b3b 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -309,11 +309,12 @@ impl LightProtocol { } // compute and deduct cost. + let pre_creds = creds.current(); let cost = params.compute_cost_multi(requests.requests()); creds.deduct_cost(cost)?; trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", - peer_id, cost, creds.current()); + peer_id, cost, pre_creds); let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); io.send(*peer_id, packet::REQUEST, { From c75b49667eb15982db6b2da083f562a6055f5896 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 14:49:02 +0100 Subject: [PATCH 65/89] workaround for #5008 --- ethcore/light/src/types/request/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index f640687d5..0e11b8a7f 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -1051,8 +1051,9 @@ pub mod account { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { + let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: rlp.list_at(0)?, + proof: proof?, nonce: rlp.val_at(1)?, balance: rlp.val_at(2)?, code_hash: rlp.val_at(3)?, @@ -1198,8 +1199,9 @@ pub mod storage { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { + let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: rlp.list_at(0)?, + proof: proof?, value: rlp.val_at(1)?, }) } From 10a470a5fa7eb2f8102a7a741e6ab4f97ed4c5cd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 15:44:16 +0100 Subject: [PATCH 66/89] better bookkeeping of requests in light sync --- ethcore/light/src/net/context.rs | 2 ++ ethcore/light/src/on_demand/mod.rs | 16 +++++++++------- ethcore/light/src/types/request/mod.rs | 3 ++- sync/src/light_sync/mod.rs | 25 +++++++++++++++++++++++-- 4 files changed, 36 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 9eafead57..64ddd19a3 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -61,10 +61,12 @@ impl<'a> IoContext for NetworkContext<'a> { } fn disconnect_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disconnect of peer {}", peer); NetworkContext::disconnect_peer(self, peer); } fn disable_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disable of peer {}", peer); NetworkContext::disable_peer(self, peer); } diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index f658a1f2c..bc1ba4fb7 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -424,6 +424,8 @@ impl Handler for OnDemand { } }; + trace!(target: "on_demand", "Handling response for request {}, kind={:?}", req_id, response.kind()); + // handle the response appropriately for the request. // all branches which do not return early lead to disabling of the peer // due to misbehavior. @@ -443,7 +445,7 @@ impl Handler for OnDemand { } return } - Err(e) => warn!("Error handling response for header request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), } } } @@ -456,7 +458,7 @@ impl Handler for OnDemand { let _ = sender.send(header); return } - Err(e) => warn!("Error handling response for header request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), } } } @@ -469,7 +471,7 @@ impl Handler for OnDemand { let _ = sender.send(block); return } - Err(e) => warn!("Error handling response for block request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for block request: {:?}", e), } } } @@ -482,7 +484,7 @@ impl Handler for OnDemand { let _ = sender.send(receipts); return } - Err(e) => warn!("Error handling response for receipts request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for receipts request: {:?}", e), } } } @@ -495,7 +497,7 @@ impl Handler for OnDemand { let _ = sender.send(maybe_account); return } - Err(e) => warn!("Error handling response for state request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e), } } } @@ -506,7 +508,7 @@ impl Handler for OnDemand { let _ = sender.send(response.code.clone()); return } - Err(e) => warn!("Error handling response for code request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for code request: {:?}", e), } } } @@ -521,7 +523,7 @@ impl Handler for OnDemand { let _ = sender.send(Err(err)); return } - ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), + ProvedExecution::BadProof => warn!(target: "on_demand", "Error handling response for transaction proof request"), } } } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 0e11b8a7f..062f5e445 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -435,7 +435,8 @@ impl Response { } } - fn kind(&self) -> Kind { + /// Inspect the kind of this response. + pub fn kind(&self) -> Kind { match *self { Response::Headers(_) => Kind::Headers, Response::HeaderProof(_) => Kind::HeaderProof, diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 18fe3c953..2bc179a21 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -32,7 +32,7 @@ //! announced blocks. //! - On bad block/response, punish peer and reset. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::mem; use std::sync::Arc; @@ -150,6 +150,19 @@ impl AncestorSearch { } } + fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch { + match self { + AncestorSearch::Awaiting(id, start, req) => { + if req_ids.iter().find(|&x| x == &id).is_some() { + AncestorSearch::Queued(start) + } else { + AncestorSearch::Awaiting(id, start, req) + } + } + other => other, + } + } + fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch where F: FnMut(HeadersRequest) -> Option { @@ -209,6 +222,7 @@ pub struct LightSync { start_block_number: u64, best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. + pending_reqs: Mutex>, // requests from this handler. client: Arc, rng: Mutex, state: Mutex, @@ -271,7 +285,8 @@ impl Handler for LightSync { *state = match mem::replace(&mut *state, SyncState::Idle) { SyncState::Idle => SyncState::Idle, - SyncState::AncestorSearch(search) => SyncState::AncestorSearch(search), + SyncState::AncestorSearch(search) => + SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)), SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)), }; } @@ -321,6 +336,10 @@ impl Handler for LightSync { return } + if !self.pending_reqs.lock().remove(&req_id) { + return + } + let headers = match responses.get(0) { Some(&request::Response::Headers(ref response)) => &response.headers[..], Some(_) => { @@ -496,6 +515,7 @@ impl LightSync { for peer in &peer_ids { match ctx.request_from(*peer, request.clone()) { Ok(id) => { + self.pending_reqs.lock().insert(id.clone()); return Some(id) } Err(NetError::NoCredits) => {} @@ -529,6 +549,7 @@ impl LightSync { start_block_number: client.as_light_client().chain_info().best_block_number, best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), + pending_reqs: Mutex::new(HashSet::new()), client: client, rng: Mutex::new(try!(OsRng::new())), state: Mutex::new(SyncState::Idle), From 974f89d5bfd0d4d63312ddc308b76fe42bde04fc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 16:00:00 +0100 Subject: [PATCH 67/89] correct workaround for RLP issue --- ethcore/light/src/types/request/mod.rs | 35 ++++++++++---------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 062f5e445..4c55cdfaf 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -738,12 +738,10 @@ pub mod header_proof { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - - s.append(&self.hash).append(&self.td); + s.begin_list(3) + .append_list::,_>(&self.proof[..]) + .append(&self.hash) + .append(&self.td); } } } @@ -1052,9 +1050,8 @@ pub mod account { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: proof?, + proof: rlp.list_at(0)?, nonce: rlp.val_at(1)?, balance: rlp.val_at(2)?, code_hash: rlp.val_at(3)?, @@ -1065,12 +1062,9 @@ pub mod account { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(5).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - - s.append(&self.nonce) + s.begin_list(5) + .append_list::,_>(&self.proof[..]) + .append(&self.nonce) .append(&self.balance) .append(&self.code_hash) .append(&self.storage_root); @@ -1200,9 +1194,8 @@ pub mod storage { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - let proof: Result<_, _> = rlp.at(0)?.iter().map(|x| x.as_list()).collect(); Ok(Response { - proof: proof?, + proof: rlp.list_at(0)?, value: rlp.val_at(1)?, }) } @@ -1210,11 +1203,9 @@ pub mod storage { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - s.append(&self.value); + s.begin_list(2) + .append_list::,_>(&self.proof[..]) + .append(&self.value); } } } @@ -1543,7 +1534,7 @@ mod tests { let full_req = Request::HeaderProof(req.clone()); let res = HeaderProofResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], hash: Default::default(), td: 100.into(), }; From 5700f4ac8186dd959d64f68791055774fd59c902 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 18:31:16 +0100 Subject: [PATCH 68/89] fix block response decoding --- ethcore/light/src/net/mod.rs | 2 +- ethcore/light/src/on_demand/mod.rs | 8 ++++++-- ethcore/light/src/types/request/mod.rs | 23 +++++++++++++++++------ rpc/src/v1/impls/light/eth.rs | 1 - 4 files changed, 24 insertions(+), 10 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 56b078b3b..4df83bf1c 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -407,7 +407,7 @@ impl LightProtocol { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "pip", "pre-verifying response from peer {}", peer); + trace!(target: "pip", "pre-verifying response for {} from peer {}", req_id, peer); let peers = self.peers.read(); let res = match peers.get(peer) { diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index bc1ba4fb7..279d7e2ac 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -37,7 +37,7 @@ use rlp::RlpStream; use util::{Bytes, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; +use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; @@ -303,17 +303,21 @@ impl OnDemand { let complete = builder.build(); + let kind = complete.requests()[0].kind(); for (id, peer) in self.peers.read().iter() { if !peer.can_handle(&pending) { continue } match ctx.request_from(*id, complete.clone()) { Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); + trace!(target: "on_demand", "{}: Assigned {:?} to peer {}", + req_id, kind, id); + self.pending_requests.write().insert( req_id, pending, ); return } + Err(net::Error::NoCredits) => {} Err(e) => trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 4c55cdfaf..aab892270 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -244,7 +244,8 @@ pub enum CompleteRequest { } impl Request { - fn kind(&self) -> Kind { + /// Get the request kind. + pub fn kind(&self) -> Kind { match *self { Request::Headers(_) => Kind::Headers, Request::HeaderProof(_) => Kind::HeaderProof, @@ -727,7 +728,6 @@ pub mod header_proof { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - Ok(Response { proof: rlp.list_at(0)?, hash: rlp.val_at(1)?, @@ -825,7 +825,6 @@ pub mod block_receipts { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - Ok(Response { receipts: rlp.as_list()?, }) @@ -922,8 +921,8 @@ pub mod block_body { use ethcore::transaction::UnverifiedTransaction; // check body validity. - let _: Vec = rlp.list_at(0)?; - let _: Vec = rlp.list_at(1)?; + let _: Vec = rlp.list_at(0)?; + let _: Vec = rlp.list_at(1)?; Ok(Response { body: encoded::Body::new(rlp.as_raw().to_owned()), @@ -1480,9 +1479,16 @@ mod tests { fn check_roundtrip(val: T) where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug { + // check as single value. let bytes = ::rlp::encode(&val); let new_val: T = ::rlp::decode(&bytes); assert_eq!(val, new_val); + + // check as list containing single value. + let list = [val]; + let bytes = ::rlp::encode_list(&list); + let new_list: Vec = ::rlp::decode_list(&bytes); + assert_eq!(&list, &new_list[..]); } #[test] @@ -1566,6 +1572,7 @@ mod tests { #[test] fn body_roundtrip() { + use ethcore::transaction::{Transaction, UnverifiedTransaction}; let req = IncompleteBodyRequest { hash: Field::Scalar(Default::default()), }; @@ -1573,8 +1580,12 @@ mod tests { let full_req = Request::Body(req.clone()); let res = BodyResponse { body: { + let header = ::ethcore::header::Header::default(); + let tx = UnverifiedTransaction::from(Transaction::default().fake_sign(Default::default())); let mut stream = RlpStream::new_list(2); - stream.begin_list(0).begin_list(0); + stream.begin_list(2).append(&tx).append(&tx) + .begin_list(1).append(&header); + ::ethcore::encoded::Body::new(stream.out()) }, }; diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index ba32bf35c..1851f479e 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -166,7 +166,6 @@ impl EthClient { fn proved_execution(&self, req: CallRequest, num: Trailing) -> BoxFuture { const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]); - let (sync, on_demand, client) = (self.sync.clone(), self.on_demand.clone(), self.client.clone()); let req: CRequest = req.into(); let id = num.0.into(); From 45c0a971426f83da7dc0a9772e426b13f147f69a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 18:49:26 +0100 Subject: [PATCH 69/89] fix body encoding --- ethcore/light/src/on_demand/mod.rs | 4 +++- ethcore/light/src/on_demand/request.rs | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 279d7e2ac..a7c1ba2c4 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -235,8 +235,10 @@ impl OnDemand { match { self.cache.lock().block_body(&req.hash) } { Some(body) => { let mut stream = RlpStream::new_list(3); + let body = body.rlp(); stream.append_raw(&req.header.into_inner(), 1); - stream.append_raw(&body.into_inner(), 2); + stream.append_raw(&body.at(0).as_raw(), 1); + stream.append_raw(&body.at(1).as_raw(), 1); sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index cda1d6feb..30337cc2c 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -151,7 +151,8 @@ impl Body { // concatenate the header and the body. let mut stream = RlpStream::new_list(3); stream.append_raw(self.header.rlp().as_raw(), 1); - stream.append_raw(&body.rlp().as_raw(), 2); + stream.append_raw(body.rlp().at(0).as_raw(), 1); + stream.append_raw(body.rlp().at(1).as_raw(), 1); Ok(encoded::Block::new(stream.out())) } From b76860fd2b700091c85d18bb9a5a68339fe0440c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 19:42:11 +0100 Subject: [PATCH 70/89] add signing RPC methods in light cli --- parity/rpc_apis.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 76e54ff81..c53e2c4a1 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -313,6 +313,20 @@ impl Dependencies for LightDependencies { self.transaction_queue.clone(), ); + macro_rules! add_signing_methods { + ($namespace:ident, $handler:expr, $deps:expr) => { + { + let deps = &$deps; + let dispatcher = dispatcher.clone(); + if deps.signer_service.is_enabled() { + $handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) + } else { + $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) + } + } + } + } + for api in apis { match *api { Api::Web3 => { @@ -332,7 +346,9 @@ impl Dependencies for LightDependencies { ); handler.extend_with(client.to_delegate()); - // TODO: filters and signing methods. + // TODO: filters. + add_signing_methods!(EthSigning, handler, self); + }, Api::Personal => { handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); @@ -355,9 +371,8 @@ impl Dependencies for LightDependencies { self.dapps_port, ).to_delegate()); - // TODO - //add_signing_methods!(EthSigning, handler, self); - //add_signing_methods!(ParitySigning, handler, self); + add_signing_methods!(EthSigning, handler, self); + add_signing_methods!(ParitySigning, handler, self); }, Api::ParityAccounts => { handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate()); From e0a79699eaa08f7a434f906b2ae262caa6122d2d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 20:02:46 +0100 Subject: [PATCH 71/89] transaction propagation on a timer --- ethcore/light/src/net/mod.rs | 51 +++++++++++++++++++++++++++++++++++- parity/rpc_apis.rs | 1 - sync/src/api.rs | 2 +- 3 files changed, 51 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 4df83bf1c..e32e92145 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -27,7 +27,7 @@ use util::hash::H256; use util::{DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fmt; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -61,6 +61,9 @@ const TIMEOUT_INTERVAL_MS: u64 = 1000; const TICK_TIMEOUT: TimerToken = 1; const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000; +const PROPAGATE_TIMEOUT: TimerToken = 2; +const PROPAGATE_TIMEOUT_INTERVAL_MS: u64 = 5000; + // minimum interval between updates. const UPDATE_INTERVAL_MS: i64 = 5000; @@ -132,6 +135,7 @@ pub struct Peer { last_update: SteadyTime, pending_requests: RequestSet, failed_requests: Vec, + propagated_transactions: HashSet, } /// A light protocol event handler. @@ -499,6 +503,47 @@ impl LightProtocol { } } + // propagate transactions to relay peers. + // if we aren't on the mainnet, we just propagate to all relay peers + fn propagate_transactions(&self, io: &IoContext) { + if self.capabilities.read().tx_relay { return } + + let ready_transactions = self.provider.ready_transactions(); + if ready_transactions.is_empty() { return } + + trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len()); + + let all_transaction_hashes: HashSet<_> = ready_transactions.iter().map(|tx| tx.hash()).collect(); + let mut buf = Vec::new(); + + let peers = self.peers.read(); + for (peer_id, peer_info) in peers.iter() { + let mut peer_info = peer_info.lock(); + if !peer_info.capabilities.tx_relay { continue } + + let prop_filter = &mut peer_info.propagated_transactions; + *prop_filter = &*prop_filter & &all_transaction_hashes; + + // fill the buffer with all non-propagated transactions. + let to_propagate = ready_transactions.iter() + .filter(|tx| prop_filter.insert(tx.hash())) + .map(|tx| &tx.transaction); + + buf.extend(to_propagate); + + // propagate to the given peer. + if buf.is_empty() { continue } + io.send(*peer_id, packet::SEND_TRANSACTIONS, { + let mut stream = RlpStream::new_list(buf.len()); + for pending_tx in buf.drain(..) { + stream.append(pending_tx); + } + + stream.out() + }) + } + } + /// called when a peer connects. pub fn on_connect(&self, peer: &PeerId, io: &IoContext) { let proto_version = match io.protocol_version(*peer).ok_or(Error::WrongNetwork) { @@ -613,6 +658,7 @@ impl LightProtocol { last_update: pending.last_update, pending_requests: RequestSet::default(), failed_requests: Vec::new(), + propagated_transactions: HashSet::new(), })); for handler in &self.handlers { @@ -797,6 +843,8 @@ impl NetworkProtocolHandler for LightProtocol { .expect("Error registering sync timer."); io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL_MS) .expect("Error registering sync timer."); + io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL_MS) + .expect("Error registering sync timer."); } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { @@ -815,6 +863,7 @@ impl NetworkProtocolHandler for LightProtocol { match timer { TIMEOUT => self.timeout_check(io), TICK_TIMEOUT => self.tick_handlers(io), + PROPAGATE_TIMEOUT => self.propagate_transactions(io), _ => warn!(target: "pip", "received timeout on unknown token {}", timer), } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index c53e2c4a1..5cfb28474 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -348,7 +348,6 @@ impl Dependencies for LightDependencies { // TODO: filters. add_signing_methods!(EthSigning, handler, self); - }, Api::Personal => { handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); diff --git a/sync/src/api.rs b/sync/src/api.rs index bfa33e7b8..3e3234d84 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -414,7 +414,7 @@ struct TxRelay(Arc); impl LightHandler for TxRelay { fn on_transactions(&self, ctx: &EventContext, relay: &[::ethcore::transaction::UnverifiedTransaction]) { - trace!(target: "les", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); + trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx).to_vec()).collect(), ctx.peer()) } } From 3708b3be63566fc0d210cbd687e2a561633e1f08 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 20:49:17 +0100 Subject: [PATCH 72/89] fix RPC tests --- rpc/src/v1/tests/mocked/parity.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index 3af627037..28ff8ff5c 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -302,7 +302,7 @@ fn rpc_parity_net_peers() { let io = deps.default_client(); let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"les":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"les":null}}]},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"pip":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"pip":null}}]},"id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } From a78068cbe966f729b85c1ad0cdedac6ec516a912 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 22:20:00 +0100 Subject: [PATCH 73/89] queue culling and informant --- Cargo.lock | 1 + Cargo.toml | 1 + ethcore/light/src/client/service.rs | 9 ++- parity/light_helpers/mod.rs | 21 ++++++ parity/light_helpers/queue_cull.rs | 99 +++++++++++++++++++++++++++++ parity/main.rs | 2 + parity/run.rs | 25 +++++++- 7 files changed, 154 insertions(+), 4 deletions(-) create mode 100644 parity/light_helpers/mod.rs create mode 100644 parity/light_helpers/queue_cull.rs diff --git a/Cargo.lock b/Cargo.lock index d572dcf79..7e0302e14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,7 @@ dependencies = [ "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", diff --git a/Cargo.toml b/Cargo.toml index 8420c5459..66c8674df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ toml = "0.2" serde = "0.9" serde_json = "0.9" app_dirs = "1.1.1" +futures = "0.1" fdlimit = "0.1" ws2_32-sys = "0.2" hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index 89538fec2..55795d870 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -50,7 +50,7 @@ impl fmt::Display for Error { /// Light client service. pub struct Service { client: Arc, - _io_service: IoService, + io_service: IoService, } impl Service { @@ -82,10 +82,15 @@ impl Service { io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; Ok(Service { client: client, - _io_service: io_service, + io_service: io_service, }) } + /// Register an I/O handler on the service. + pub fn register_handler(&self, handler: Arc + Send>) -> Result<(), IoError> { + self.io_service.register_handler(handler) + } + /// Get a handle to the client. pub fn client(&self) -> &Arc { &self.client diff --git a/parity/light_helpers/mod.rs b/parity/light_helpers/mod.rs new file mode 100644 index 000000000..488f970c2 --- /dev/null +++ b/parity/light_helpers/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Utilities and helpers for the light client. + +mod queue_cull; + +pub use self::queue_cull::QueueCull; diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs new file mode 100644 index 000000000..10865d485 --- /dev/null +++ b/parity/light_helpers/queue_cull.rs @@ -0,0 +1,99 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Service for culling the light client's transaction queue. + +use std::sync::Arc; +use std::time::Duration; + +use ethcore::service::ClientIoMessage; +use ethsync::LightSync; +use io::{IoContext, IoHandler, TimerToken}; + +use light::client::Client; +use light::on_demand::{request, OnDemand}; +use light::TransactionQueue; + +use futures::{future, stream, Future, Stream}; + +use parity_reactor::Remote; + +use util::RwLock; + +// Attepmt to cull once every 10 minutes. +const TOKEN: TimerToken = 1; +const TIMEOUT_MS: u64 = 1000 * 60 * 10; + +// But make each attempt last only 9 minutes +const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9; + +/// Periodically culls the transaction queue of mined transactions. +pub struct QueueCull { + /// A handle to the client, for getting the latest block header. + pub client: Arc, + /// A handle to the sync service. + pub sync: Arc, + /// The on-demand request service. + pub on_demand: Arc, + /// The transaction queue. + pub txq: Arc>, + /// Event loop remote. + pub remote: Remote, +} + +impl IoHandler for QueueCull { + fn initialize(&self, io: &IoContext) { + io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer"); + } + + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if timer != TOKEN { return } + + let senders = self.txq.read().queued_senders(); + if senders.is_empty() { return } + + let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone()); + let best_header = self.client.best_block_header(); + let start_nonce = self.client.engine().account_start_nonce(); + + info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len()); + self.remote.spawn_with_timeout(move || { + let maybe_fetching = sync.with_context(move |ctx| { + // fetch the nonce of each sender in the queue. + let nonce_futures = senders.iter() + .map(|&address| request::Account { header: best_header.clone(), address: address }) + .map(|request| on_demand.account(ctx, request)) + .map(move |fut| fut.map(move |x| x.map(|acc| acc.nonce).unwrap_or(start_nonce))) + .zip(senders.iter()) + .map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce))); + + // as they come in, update each sender to the new nonce. + stream::futures_unordered(nonce_futures) + .fold(txq, |txq, (address, nonce)| { + txq.write().cull(address, nonce); + future::ok(txq) + }) + .map(|_| ()) // finally, discard the txq handle and log errors. + .map_err(|_| debug!(target: "cull", "OnDemand prematurely closed channel.")) + }); + + match maybe_fetching { + Some(fut) => fut.boxed(), + None => future::ok(()).boxed(), + } + }, Duration::from_millis(PURGE_TIMEOUT_MS), || {}) + } +} diff --git a/parity/main.rs b/parity/main.rs index 2044b3ee0..cde0e6c1f 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -28,6 +28,7 @@ extern crate ctrlc; extern crate docopt; extern crate env_logger; extern crate fdlimit; +extern crate futures; extern crate hyper; extern crate isatty; extern crate jsonrpc_core; @@ -101,6 +102,7 @@ mod deprecated; mod dir; mod helpers; mod informant; +mod light_helpers; mod migration; mod modules; mod params; diff --git a/parity/run.rs b/parity/run.rs index b3d9bf90b..cf7d5e82c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -238,12 +238,24 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> }; let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; let light_sync = Arc::new(light_sync); - light_sync.start_network(); - // start RPCs. // spin up event loop let event_loop = EventLoop::spawn(); + // queue cull service. + let queue_cull = Arc::new(::light_helpers::QueueCull { + client: service.client().clone(), + sync: light_sync.clone(), + on_demand: on_demand.clone(), + txq: txq.clone(), + remote: event_loop.remote(), + }); + + service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?; + + // start the network. + light_sync.start_network(); + // fetch service let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; @@ -253,6 +265,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> let rpc_stats = Arc::new(informant::RpcStats::default()); let signer_path = cmd.signer_conf.signer_path.clone(); + // start RPCs let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { signer_service: Arc::new(rpc_apis::SignerService::new(move || { signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) @@ -299,6 +312,14 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> // TODO: Dapps + // minimal informant thread. Just prints block number every 5 seconds. + // TODO: integrate with informant.rs + let informant_client = service.client().clone(); + ::std::thread::spawn(move || loop { + info!("#{}", informant_client.best_block_header().number()); + ::std::thread::sleep(::std::time::Duration::from_secs(5)); + }); + // wait for ctrl-c. Ok(wait_for_exit(panic_handler, None, None, can_restart)) } From ac057ebe935505a706f2aa733885e469dc2c656c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 23 Mar 2017 22:36:15 +0100 Subject: [PATCH 74/89] fix test build --- ethcore/light/src/net/tests/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 67dfe8131..6dc5fbe7e 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -612,6 +612,7 @@ fn id_guard() { last_update: ::time::SteadyTime::now(), pending_requests: pending_requests, failed_requests: Vec::new(), + propagated_transactions: Default::default(), })); // first, malformed responses. From b840ab8f8be54c64e95a3ba767685200444063b9 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 28 Mar 2017 16:23:09 +0200 Subject: [PATCH 75/89] Compact chunks --- ethcore/src/snapshot/account.rs | 101 +++++++++++++++++----------- ethcore/src/snapshot/mod.rs | 32 ++++----- ethcore/src/snapshot/tests/state.rs | 5 +- util/rlp/src/stream.rs | 67 ++++++++++++++++-- util/rlp/tests/tests.rs | 22 ++++++ 5 files changed, 161 insertions(+), 66 deletions(-) diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index dacd9ba52..23ba31e17 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -23,11 +23,10 @@ use snapshot::Error; use util::{U256, H256, Bytes, HashDB, SHA3_EMPTY, SHA3_NULL_RLP}; use util::trie::{TrieDB, Trie}; use rlp::{RlpStream, UntrustedRlp}; -use itertools::Itertools; use std::collections::HashSet; -// An empty account -- these are replaced with RLP null data for a space optimization. +// An empty account -- these were replaced with RLP null data for a space optimization in v1. const ACC_EMPTY: BasicAccount = BasicAccount { nonce: U256([0, 0, 0, 0]), balance: U256([0, 0, 0, 0]), @@ -62,28 +61,19 @@ impl CodeState { } // walk the account's storage trie, returning a vector of RLP items containing the -// account properties and the storage. Each item contains at most `max_storage_items` +// account address hash, account properties and the storage. Each item contains at most `max_storage_items` // storage records split according to snapshot format definition. -pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet, max_storage_items: usize) -> Result, Error> { - if acc == &ACC_EMPTY { - return Ok(vec![::rlp::NULL_RLP.to_vec()]); - } - +pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet, first_chunk_size: usize, max_chunk_size: usize) -> Result, Error> { let db = TrieDB::new(acct_db, &acc.storage_root)?; + let mut chunks = Vec::new(); + let mut db_iter = db.iter()?; + let mut target_chunk_size = first_chunk_size; + let mut account_stream = RlpStream::new_list(2); + let mut leftover: Option> = None; + loop { + account_stream.append(account_hash); + account_stream.begin_list(5); - let chunks = db.iter()?.chunks(max_storage_items); - let pair_chunks = chunks.into_iter().map(|chunk| chunk.collect()); - pair_chunks.pad_using(1, |_| Vec::new(), ).map(|pairs| { - let mut stream = RlpStream::new_list(pairs.len()); - - for r in pairs { - let (k, v) = r?; - stream.begin_list(2).append(&k).append(&&*v); - } - - let pairs_rlp = stream.out(); - - let mut account_stream = RlpStream::new_list(5); account_stream.append(&acc.nonce) .append(&acc.balance); @@ -105,9 +95,47 @@ pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut Hash } } - account_stream.append_raw(&pairs_rlp, 1); - Ok(account_stream.out()) - }).collect() + account_stream.begin_unbounded_list(); + if account_stream.len() > target_chunk_size { + // account does not fit, push an empty record to mark a new chunk + target_chunk_size = max_chunk_size; + chunks.push(Vec::new()); + } + + if let Some(pair) = leftover.take() { + account_stream.append_raw_checked(&pair, 1, target_chunk_size); + } + + loop { + match db_iter.next() { + Some(Ok((k, v))) => { + let pair = { + let mut stream = RlpStream::new_list(2); + stream.append(&k).append(&&*v); + stream.drain() + }; + if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) { + account_stream.complete_unbounded_list(); + let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2)); + chunks.push(stream.out()); + target_chunk_size = max_chunk_size; + leftover = Some(pair.to_vec()); + break; + } + }, + Some(Err(e)) => { + return Err(e.into()); + }, + None => { + account_stream.complete_unbounded_list(); + let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2)); + chunks.push(stream.out()); + return Ok(chunks); + } + } + + } + } } // decode a fat rlp, and rebuild the storage trie as we go. @@ -181,7 +209,7 @@ mod tests { use snapshot::tests::helpers::fill_storage; use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP}; - use util::{Address, H256, HashDB, DBValue}; + use util::{Address, H256, HashDB, DBValue, Hashable}; use rlp::UntrustedRlp; use std::collections::HashSet; @@ -203,8 +231,8 @@ mod tests { let thin_rlp = ::rlp::encode(&account); assert_eq!(::rlp::decode::(&thin_rlp), account); - let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value()).unwrap(); - let fat_rlp = UntrustedRlp::new(&fat_rlps[0]); + let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); + let fat_rlp = UntrustedRlp::new(&fat_rlps[0]).at(1).unwrap(); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); } @@ -228,8 +256,8 @@ mod tests { let thin_rlp = ::rlp::encode(&account); assert_eq!(::rlp::decode::(&thin_rlp), account); - let fat_rlp = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value()).unwrap(); - let fat_rlp = UntrustedRlp::new(&fat_rlp[0]); + let fat_rlp = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); + let fat_rlp = UntrustedRlp::new(&fat_rlp[0]).at(1).unwrap(); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); } @@ -253,11 +281,11 @@ mod tests { let thin_rlp = ::rlp::encode(&account); assert_eq!(::rlp::decode::(&thin_rlp), account); - let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 100).unwrap(); + let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 500, 1000).unwrap(); let mut root = SHA3_NULL_RLP; let mut restored_account = None; for rlp in fat_rlps { - let fat_rlp = UntrustedRlp::new(&rlp); + let fat_rlp = UntrustedRlp::new(&rlp).at(1).unwrap(); restored_account = Some(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, root).unwrap().0); root = restored_account.as_ref().unwrap().storage_root.clone(); } @@ -297,12 +325,12 @@ mod tests { let mut used_code = HashSet::new(); - let fat_rlp1 = to_fat_rlps(&account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value()).unwrap(); - let fat_rlp2 = to_fat_rlps(&account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value()).unwrap(); + let fat_rlp1 = to_fat_rlps(&addr1.sha3(), &account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value(), usize::max_value()).unwrap(); + let fat_rlp2 = to_fat_rlps(&addr2.sha3(), &account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value(), usize::max_value()).unwrap(); assert_eq!(used_code.len(), 1); - let fat_rlp1 = UntrustedRlp::new(&fat_rlp1[0]); - let fat_rlp2 = UntrustedRlp::new(&fat_rlp2[0]); + let fat_rlp1 = UntrustedRlp::new(&fat_rlp1[0]).at(1).unwrap(); + let fat_rlp2 = UntrustedRlp::new(&fat_rlp2[0]).at(1).unwrap(); let (acc, maybe_code) = from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr2), fat_rlp2, H256::zero()).unwrap(); assert!(maybe_code.is_none()); @@ -316,9 +344,6 @@ mod tests { #[test] fn encoding_empty_acc() { let mut db = get_temp_state_db(); - let mut used_code = HashSet::new(); - - assert_eq!(to_fat_rlps(&ACC_EMPTY, &AccountDB::new(db.as_hashdb(), &Address::default()), &mut used_code, usize::max_value()).unwrap(), vec![::rlp::NULL_RLP.to_vec()]); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &Address::default()), UntrustedRlp::new(&::rlp::NULL_RLP), H256::zero()).unwrap(), (ACC_EMPTY, None)); } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 97eceeab3..69dbc943d 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -83,9 +83,6 @@ mod traits { // Try to have chunks be around 4MB (before compression) const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; -// Try to have chunks be around 4MB (before compression) -const MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD: usize = 80_000; - // How many blocks to include in a snapshot, starting from the head of the chain. const SNAPSHOT_BLOCKS: u64 = 30000; @@ -305,20 +302,9 @@ impl<'a> StateChunker<'a> { // // If the buffer is greater than the desired chunk size, // this will write out the data to disk. - fn push(&mut self, account_hash: Bytes, data: Bytes, force_chunk: bool) -> Result<(), Error> { - let pair = { - let mut stream = RlpStream::new_list(2); - stream.append(&account_hash).append_raw(&data, 1); - stream.out() - }; - - if force_chunk || self.cur_size + pair.len() >= PREFERRED_CHUNK_SIZE { - self.write_chunk()?; - } - - self.cur_size += pair.len(); - self.rlps.push(pair); - + fn push(&mut self, data: Bytes) -> Result<(), Error> { + self.cur_size += data.len(); + self.rlps.push(data); Ok(()) } @@ -348,6 +334,11 @@ impl<'a> StateChunker<'a> { Ok(()) } + + // Get current chunk size. + fn chunk_size(&self) -> usize { + self.cur_size + } } /// Walk the given state database starting from the given root, @@ -377,9 +368,12 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex 0)?; + if i > 0 { + chunker.write_chunk()?; + } + chunker.push(fat_rlp)?; } } diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index cbe7e172f..744b86577 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -122,10 +122,9 @@ fn get_code_from_prev_chunk() { let mut db = MemoryDB::new(); AccountDBMut::from_hash(&mut db, hash).insert(&code[..]); - let fat_rlp = account::to_fat_rlps(&acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value()).unwrap(); - + let fat_rlp = account::to_fat_rlps(&hash, &acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value(), usize::max_value()).unwrap(); let mut stream = RlpStream::new_list(1); - stream.begin_list(2).append(&hash).append_raw(&fat_rlp[0], 1); + stream.append_raw(&fat_rlp[0], 1); stream.out() }; diff --git a/util/rlp/src/stream.rs b/util/rlp/src/stream.rs index 318e019fc..11a16b859 100644 --- a/util/rlp/src/stream.rs +++ b/util/rlp/src/stream.rs @@ -23,11 +23,11 @@ use traits::Encodable; struct ListInfo { position: usize, current: usize, - max: usize, + max: Option, } impl ListInfo { - fn new(position: usize, max: usize) -> ListInfo { + fn new(position: usize, max: Option) -> ListInfo { ListInfo { position: position, current: 0, @@ -133,7 +133,7 @@ impl RlpStream { self.buffer.push(0); let position = self.buffer.len(); - self.unfinished_lists.push(ListInfo::new(position, len)); + self.unfinished_lists.push(ListInfo::new(position, Some(len))); }, } @@ -141,6 +141,19 @@ impl RlpStream { self } + + /// Declare appending the list of unknown size, chainable. + pub fn begin_unbounded_list(&mut self) -> &mut RlpStream { + self.finished_list = false; + // payload is longer than 1 byte only for lists > 55 bytes + // by pushing always this 1 byte we may avoid unnecessary shift of data + self.buffer.push(0); + let position = self.buffer.len(); + self.unfinished_lists.push(ListInfo::new(position, None)); + // return chainable self + self + } + /// Apends null to the end of stream, chainable. /// /// ```rust @@ -177,6 +190,36 @@ impl RlpStream { self } + /// Appends raw (pre-serialised) RLP data. Checks for size oveflow. + pub fn append_raw_checked<'a>(&'a mut self, bytes: &[u8], item_count: usize, max_size: usize) -> bool { + if self.estimate_size(bytes.len()) > max_size { + return false; + } + self.append_raw(bytes, item_count); + true + } + + /// Calculate total RLP size for appended payload. + pub fn estimate_size<'a>(&'a self, add: usize) -> usize { + let total_size = self.buffer.len() + add; + let mut base_size = total_size; + for list in &self.unfinished_lists[..] { + let len = total_size - list.position; + if len > 55 { + let leading_empty_bytes = (len as u64).leading_zeros() as usize / 8; + let size_bytes = 8 - leading_empty_bytes; + base_size += size_bytes; + } + } + base_size + } + + + /// Returns current RLP size in bytes for the data pushed into the list. + pub fn len<'a>(&'a self) -> usize { + self.estimate_size(0) + } + /// Clear the output stream so far. /// /// ```rust @@ -246,10 +289,11 @@ impl RlpStream { None => false, Some(ref mut x) => { x.current += inserted_items; - if x.current > x.max { - panic!("You cannot append more items then you expect!"); + match x.max { + Some(ref max) if x.current > *max => panic!("You cannot append more items then you expect!"), + Some(ref max) => x.current == *max, + _ => false, } - x.current == x.max } }; @@ -273,6 +317,17 @@ impl RlpStream { false => panic!() } } + + /// Finalize current ubnbound list. Panics if no unbounded list has been opened. + pub fn complete_unbounded_list(&mut self) { + let list = self.unfinished_lists.pop().expect("No open list."); + if list.max.is_some() { + panic!("List type mismatch."); + } + let len = self.buffer.len() - list.position; + self.encoder().insert_list_payload(len, list.position); + self.note_appended(1); + } } pub struct BasicEncoder<'a> { diff --git a/util/rlp/tests/tests.rs b/util/rlp/tests/tests.rs index 1c996caac..d207034ce 100644 --- a/util/rlp/tests/tests.rs +++ b/util/rlp/tests/tests.rs @@ -412,3 +412,25 @@ fn test_rlp_list_length_overflow() { let as_val: Result = rlp.val_at(0); assert_eq!(Err(DecoderError::RlpIsTooShort), as_val); } + +#[test] +fn test_rlp_stream_size_limit() { + for limit in 40 .. 270 { + let item = [0u8; 1]; + let mut stream = RlpStream::new(); + while stream.append_raw_checked(&item, 1, limit) {} + assert_eq!(stream.drain().len(), limit); + } +} + +#[test] +fn test_rlp_stream_unbounded_list() { + let mut stream = RlpStream::new(); + stream.begin_unbounded_list(); + stream.append(&40u32); + stream.append(&41u32); + assert!(!stream.is_finished()); + stream.complete_unbounded_list(); + assert!(stream.is_finished()); +} + From 0f65779d29814e0275d49036a2855562f92b89a0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Apr 2017 12:54:27 +0200 Subject: [PATCH 76/89] fix indentation in usage.txt --- parity/cli/usage.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index f970887f7..1e5f3c0fb 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -72,9 +72,9 @@ Operating Options: --identity NAME Specify your node's name. (default: {flag_identity}) --light Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary - data on-demand from the network. Much lower in storage, - potentially higher in bandwidth. Has no effect with - subcommands (default: {flag_light}) + data on-demand from the network. Much lower in storage, + potentially higher in bandwidth. Has no effect with + subcommands (default: {flag_light}). Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. From 50e0221dd147b64e0c7ac7e50c1c5611ce1a3b2a Mon Sep 17 00:00:00 2001 From: maciejhirsz Date: Mon, 3 Apr 2017 18:50:11 +0200 Subject: [PATCH 77/89] Perf and fixes --- js/package.json | 2 +- js/src/api/local/accounts/account.js | 40 +++--- js/src/api/local/accounts/accounts.js | 58 +++++--- js/src/api/local/ethkey/index.js | 39 +++--- js/src/api/local/ethkey/worker.js | 129 ++++++++++++------ js/src/api/local/ethkey/workerPool.js | 61 +++++++++ js/src/api/local/middleware.js | 81 +++++++---- js/src/api/transport/jsonRpcBase.js | 12 +- .../CreateAccount/NewAccount/newAccount.js | 13 +- .../NewAccount/newAccount.spec.js | 9 +- js/src/modals/CreateAccount/store.js | 6 +- js/src/modals/CreateAccount/store.spec.js | 3 + 12 files changed, 324 insertions(+), 129 deletions(-) create mode 100644 js/src/api/local/ethkey/workerPool.js diff --git a/js/package.json b/js/package.json index 82807e820..0585a1681 100644 --- a/js/package.json +++ b/js/package.json @@ -176,7 +176,7 @@ "geopattern": "1.2.3", "isomorphic-fetch": "2.2.1", "js-sha3": "0.5.5", - "keythereum": "0.4.3", + "keythereum": "0.4.6", "lodash": "4.17.2", "loglevel": "1.4.1", "marked": "0.3.6", diff --git a/js/src/api/local/accounts/account.js b/js/src/api/local/accounts/account.js index da9de1359..94e923f45 100644 --- a/js/src/api/local/accounts/account.js +++ b/js/src/api/local/accounts/account.js @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { keythereum } from '../ethkey'; +import { createKeyObject, decryptPrivateKey } from '../ethkey'; export default class Account { constructor (persist, data) { @@ -31,12 +31,14 @@ export default class Account { } isValidPassword (password) { - try { - keythereum.recover(Buffer.from(password), this._keyObject); - return true; - } catch (e) { - return false; - } + return decryptPrivateKey(this._keyObject, password) + .then((privateKey) => { + if (!privateKey) { + return false; + } + + return true; + }); } get address () { @@ -68,21 +70,23 @@ export default class Account { } decryptPrivateKey (password) { - return keythereum.recover(Buffer.from(password), this._keyObject); + return decryptPrivateKey(this._keyObject, password); + } + + changePassword (key, password) { + return createKeyObject(key, password).then((keyObject) => { + this._keyObject = keyObject; + + this._persist(); + }); } static fromPrivateKey (persist, key, password) { - const iv = keythereum.crypto.randomBytes(16); - const salt = keythereum.crypto.randomBytes(32); + return createKeyObject(key, password).then((keyObject) => { + const account = new Account(persist, { keyObject }); - // Keythereum will fail if `password` is an empty string - password = Buffer.from(password); - - const keyObject = keythereum.dump(password, key, salt, iv); - - const account = new Account(persist, { keyObject }); - - return account; + return account; + }); } toJSON () { diff --git a/js/src/api/local/accounts/accounts.js b/js/src/api/local/accounts/accounts.js index 576addcb1..1bce1329a 100644 --- a/js/src/api/local/accounts/accounts.js +++ b/js/src/api/local/accounts/accounts.js @@ -38,14 +38,22 @@ export default class Accounts { create (secret, password) { const privateKey = Buffer.from(secret.slice(2), 'hex'); - const account = Account.fromPrivateKey(this.persist, privateKey, password); - this._store.push(account); - this.lastAddress = account.address; + return Account.fromPrivateKey(this.persist, privateKey, password) + .then((account) => { + const { address } = account; - this.persist(); + if (this._store.find((account) => account.address === address)) { + throw new Error(`Account ${address} already exists!`); + } - return account.address; + this._store.push(account); + this.lastAddress = address; + + this.persist(); + + return account.address; + }); } set lastAddress (value) { @@ -73,28 +81,40 @@ export default class Accounts { remove (address, password) { address = address.toLowerCase(); + const account = this.get(address); + + if (!account) { + return false; + } + + return account.isValidPassword(password) + .then((isValid) => { + if (!isValid) { + return false; + } + + if (address === this.lastAddress) { + this.lastAddress = NULL_ADDRESS; + } + + this.removeUnsafe(address); + + return true; + }); + } + + removeUnsafe (address) { + address = address.toLowerCase(); + const index = this._store.findIndex((account) => account.address === address); if (index === -1) { - return false; - } - - const account = this._store[index]; - - if (!account.isValidPassword(password)) { - console.log('invalid password'); - return false; - } - - if (address === this.lastAddress) { - this.lastAddress = NULL_ADDRESS; + return; } this._store.splice(index, 1); this.persist(); - - return true; } mapArray (mapper) { diff --git a/js/src/api/local/ethkey/index.js b/js/src/api/local/ethkey/index.js index ac2efa72e..4539c8c50 100644 --- a/js/src/api/local/ethkey/index.js +++ b/js/src/api/local/ethkey/index.js @@ -14,31 +14,34 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -// Allow a web worker in the browser, with a fallback for Node.js -const hasWebWorkers = typeof Worker !== 'undefined'; -const KeyWorker = hasWebWorkers ? require('worker-loader!./worker') - : require('./worker').KeyWorker; +import workerPool from './workerPool'; -// Local accounts should never be used outside of the browser -export let keythereum = null; +export function createKeyObject (key, password) { + return workerPool.getWorker().action('createKeyObject', { key, password }) + .then((obj) => JSON.parse(obj)); +} -if (hasWebWorkers) { - require('keythereum/dist/keythereum'); +export function decryptPrivateKey (keyObject, password) { + return workerPool.getWorker() + .action('decryptPrivateKey', { keyObject, password }) + .then((privateKey) => { + if (privateKey) { + return Buffer.from(privateKey); + } - keythereum = window.keythereum; + return null; + }); } export function phraseToAddress (phrase) { - return phraseToWallet(phrase).then((wallet) => wallet.address); + return phraseToWallet(phrase) + .then((wallet) => wallet.address); } export function phraseToWallet (phrase) { - return new Promise((resolve, reject) => { - const worker = new KeyWorker(); - - worker.postMessage(phrase); - worker.onmessage = ({ data }) => { - resolve(data); - }; - }); + return workerPool.getWorker().action('phraseToWallet', phrase); +} + +export function verifySecret (secret) { + return workerPool.getWorker().action('verifySecret', secret); } diff --git a/js/src/api/local/ethkey/worker.js b/js/src/api/local/ethkey/worker.js index a472ee29a..3a0c34f7d 100644 --- a/js/src/api/local/ethkey/worker.js +++ b/js/src/api/local/ethkey/worker.js @@ -14,58 +14,107 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { keccak_256 as keccak256 } from 'js-sha3'; import secp256k1 from 'secp256k1/js'; +import { keccak_256 as keccak256 } from 'js-sha3'; + +const isWorker = typeof self !== 'undefined'; // Stay compatible between environments -if (typeof self !== 'object') { +if (!isWorker) { const scope = typeof global === 'undefined' ? window : global; scope.self = scope; } +// keythereum should never be used outside of the browser +let keythereum = null; + +if (isWorker) { + require('keythereum/dist/keythereum'); + + keythereum = self.keythereum; +} + +function route ({ action, payload }) { + if (action in actions) { + return actions[action](payload); + } + + return null; +} + +const actions = { + phraseToWallet (phrase) { + let secret = keccak256.array(phrase); + + for (let i = 0; i < 16384; i++) { + secret = keccak256.array(secret); + } + + while (true) { + secret = keccak256.array(secret); + + const secretBuf = Buffer.from(secret); + + if (secp256k1.privateKeyVerify(secretBuf)) { + // No compression, slice out last 64 bytes + const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64); + const address = keccak256.array(publicBuf).slice(12); + + if (address[0] !== 0) { + continue; + } + + const wallet = { + secret: bytesToHex(secretBuf), + public: bytesToHex(publicBuf), + address: bytesToHex(address) + }; + + return wallet; + } + } + }, + + verifySecret (secret) { + const key = Buffer.from(secret.slice(2), 'hex'); + + return secp256k1.privateKeyVerify(key); + }, + + createKeyObject ({ key, password }) { + key = Buffer.from(key); + password = Buffer.from(password); + + const iv = keythereum.crypto.randomBytes(16); + const salt = keythereum.crypto.randomBytes(32); + const keyObject = keythereum.dump(password, key, salt, iv); + + return JSON.stringify(keyObject); + }, + + decryptPrivateKey ({ keyObject, password }) { + password = Buffer.from(password); + + try { + const key = keythereum.recover(password, keyObject); + + // Convert to array to safely send from the worker + return Array.from(key); + } catch (e) { + return null; + } + } +}; + function bytesToHex (bytes) { return '0x' + Array.from(bytes).map(n => ('0' + n.toString(16)).slice(-2)).join(''); } -// Logic ported from /ethkey/src/brain.rs -function phraseToWallet (phrase) { - let secret = keccak256.array(phrase); - - for (let i = 0; i < 16384; i++) { - secret = keccak256.array(secret); - } - - while (true) { - secret = keccak256.array(secret); - - const secretBuf = Buffer.from(secret); - - if (secp256k1.privateKeyVerify(secretBuf)) { - // No compression, slice out last 64 bytes - const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64); - const address = keccak256.array(publicBuf).slice(12); - - if (address[0] !== 0) { - continue; - } - - const wallet = { - secret: bytesToHex(secretBuf), - public: bytesToHex(publicBuf), - address: bytesToHex(address) - }; - - return wallet; - } - } -} - self.onmessage = function ({ data }) { - const wallet = phraseToWallet(data); + const result = route(data); - postMessage(wallet); - close(); + postMessage(result); }; // Emulate a web worker in Node.js @@ -73,9 +122,9 @@ class KeyWorker { postMessage (data) { // Force async setTimeout(() => { - const wallet = phraseToWallet(data); + const result = route(data); - this.onmessage({ data: wallet }); + this.onmessage({ data: result }); }, 0); } diff --git a/js/src/api/local/ethkey/workerPool.js b/js/src/api/local/ethkey/workerPool.js new file mode 100644 index 000000000..ff5315898 --- /dev/null +++ b/js/src/api/local/ethkey/workerPool.js @@ -0,0 +1,61 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +// Allow a web worker in the browser, with a fallback for Node.js +const hasWebWorkers = typeof Worker !== 'undefined'; +const KeyWorker = hasWebWorkers ? require('worker-loader!./worker') + : require('./worker').KeyWorker; + +class WorkerContainer { + busy = false; + _worker = new KeyWorker(); + + action (action, payload) { + if (this.busy) { + throw new Error('Cannot issue an action on a busy worker!'); + } + + this.busy = true; + + return new Promise((resolve, reject) => { + this._worker.postMessage({ action, payload }); + this._worker.onmessage = ({ data }) => { + this.busy = false; + resolve(data); + }; + }); + } +} + +class WorkerPool { + pool = []; + + getWorker () { + let container = this.pool.find((container) => !container.busy); + + if (container) { + return container; + } + + container = new WorkerContainer(); + + this.pool.push(container); + + return container; + } +} + +export default new WorkerPool(); diff --git a/js/src/api/local/middleware.js b/js/src/api/local/middleware.js index d5997c60a..ece3fa17d 100644 --- a/js/src/api/local/middleware.js +++ b/js/src/api/local/middleware.js @@ -19,7 +19,7 @@ import accounts from './accounts'; import transactions from './transactions'; import { Middleware } from '../transport'; import { inNumber16 } from '../format/input'; -import { phraseToWallet, phraseToAddress } from './ethkey'; +import { phraseToWallet, phraseToAddress, verifySecret } from './ethkey'; import { randomPhrase } from '@parity/wordlist'; export default class LocalAccountsMiddleware extends Middleware { @@ -57,6 +57,21 @@ export default class LocalAccountsMiddleware extends Middleware { }); }); + register('parity_changePassword', ([address, oldPassword, newPassword]) => { + const account = accounts.get(address); + + return account.decryptPrivateKey(oldPassword) + .then((privateKey) => { + if (!privateKey) { + return false; + } + + account.changePassword(privateKey, newPassword); + + return true; + }); + }); + register('parity_checkRequest', ([id]) => { return transactions.hash(id) || Promise.resolve(null); }); @@ -84,6 +99,17 @@ export default class LocalAccountsMiddleware extends Middleware { }); }); + register('parity_newAccountFromSecret', ([secret, password]) => { + return verifySecret(secret) + .then((isValid) => { + if (!isValid) { + throw new Error('Invalid secret key'); + } + + return accounts.create(secret, password); + }); + }); + register('parity_setAccountMeta', ([address, meta]) => { accounts.get(address).meta = meta; @@ -127,6 +153,12 @@ export default class LocalAccountsMiddleware extends Middleware { return accounts.remove(address, password); }); + register('parity_testPassword', ([address, password]) => { + const account = accounts.get(address); + + return account.isValidPassword(password); + }); + register('signer_confirmRequest', ([id, modify, password]) => { const { gasPrice, @@ -137,30 +169,33 @@ export default class LocalAccountsMiddleware extends Middleware { data } = Object.assign(transactions.get(id), modify); - return this - .rpcRequest('parity_nextNonce', [from]) - .then((nonce) => { - const tx = new EthereumTx({ - nonce, - to, - data, - gasLimit: inNumber16(gasLimit), - gasPrice: inNumber16(gasPrice), - value: inNumber16(value) - }); - const account = accounts.get(from); + const account = accounts.get(from); - tx.sign(account.decryptPrivateKey(password)); - - const serializedTx = `0x${tx.serialize().toString('hex')}`; - - return this.rpcRequest('eth_sendRawTransaction', [serializedTx]); - }) - .then((hash) => { - transactions.confirm(id, hash); - - return {}; + return Promise.all([ + this.rpcRequest('parity_nextNonce', [from]), + account.decryptPrivateKey(password) + ]) + .then(([nonce, privateKey]) => { + const tx = new EthereumTx({ + nonce, + to, + data, + gasLimit: inNumber16(gasLimit), + gasPrice: inNumber16(gasPrice), + value: inNumber16(value) }); + + tx.sign(privateKey); + + const serializedTx = `0x${tx.serialize().toString('hex')}`; + + return this.rpcRequest('eth_sendRawTransaction', [serializedTx]); + }) + .then((hash) => { + transactions.confirm(id, hash); + + return {}; + }); }); register('signer_rejectRequest', ([id]) => { diff --git a/js/src/api/transport/jsonRpcBase.js b/js/src/api/transport/jsonRpcBase.js index 46df718a7..573204c3e 100644 --- a/js/src/api/transport/jsonRpcBase.js +++ b/js/src/api/transport/jsonRpcBase.js @@ -80,12 +80,16 @@ export default class JsonRpcBase extends EventEmitter { const res = middleware.handle(method, params); if (res != null) { - const result = this._wrapSuccessResult(res); - const json = this.encode(method, params); + // If `res` isn't a promise, we need to wrap it + return Promise.resolve(res) + .then((res) => { + const result = this._wrapSuccessResult(res); + const json = this.encode(method, params); - Logging.send(method, params, { json, result }); + Logging.send(method, params, { json, result }); - return res; + return res; + }); } } diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.js b/js/src/modals/CreateAccount/NewAccount/newAccount.js index 04f2f272a..9c6be9f6e 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.js @@ -23,6 +23,7 @@ import { RadioButton, RadioButtonGroup } from 'material-ui/RadioButton'; import { Form, Input, IdentityIcon } from '~/ui'; import PasswordStrength from '~/ui/Form/PasswordStrength'; import { RefreshIcon } from '~/ui/Icons'; +import Loading from '~/ui/Loading'; import ChangeVault from '../ChangeVault'; import styles from '../createAccount.css'; @@ -170,7 +171,9 @@ export default class CreateAccount extends Component { const { accounts } = this.state; if (!accounts) { - return null; + return ( + + ); } const identities = Object @@ -205,6 +208,14 @@ export default class CreateAccount extends Component { createIdentities = () => { const { createStore } = this.props; + this.setState({ + accounts: null, + selectedAddress: '' + }); + + createStore.setAddress(''); + createStore.setPhrase(''); + return createStore .createIdentities() .then((accounts) => { diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js index 87c7ba3fc..d6d38779f 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js @@ -58,12 +58,13 @@ describe('modals/CreateAccount/NewAccount', () => { return instance.componentWillMount(); }); - it('creates initial accounts', () => { - expect(Object.keys(instance.state.accounts).length).to.equal(7); + it('resets the accounts', () => { + expect(instance.state.accounts).to.be.null; + // expect(Object.keys(instance.state.accounts).length).to.equal(7); }); - it('sets the initial selected value', () => { - expect(instance.state.selectedAddress).to.equal(Object.keys(instance.state.accounts)[0]); + it('resets the initial selected value', () => { + expect(instance.state.selectedAddress).to.equal(''); }); }); }); diff --git a/js/src/modals/CreateAccount/store.js b/js/src/modals/CreateAccount/store.js index 52dddac80..9bc60d9af 100644 --- a/js/src/modals/CreateAccount/store.js +++ b/js/src/modals/CreateAccount/store.js @@ -69,7 +69,7 @@ export default class Store { return !(this.nameError || this.walletFileError); case 'fromNew': - return !(this.nameError || this.passwordRepeatError); + return !(this.nameError || this.passwordRepeatError) && this.hasAddress; case 'fromPhrase': return !(this.nameError || this.passwordRepeatError); @@ -85,6 +85,10 @@ export default class Store { } } + @computed get hasAddress () { + return !!(this.address); + } + @computed get passwordRepeatError () { return this.password === this.passwordRepeat ? null diff --git a/js/src/modals/CreateAccount/store.spec.js b/js/src/modals/CreateAccount/store.spec.js index b02f013b6..9d7bc10a2 100644 --- a/js/src/modals/CreateAccount/store.spec.js +++ b/js/src/modals/CreateAccount/store.spec.js @@ -329,6 +329,7 @@ describe('modals/CreateAccount/Store', () => { describe('createType === fromNew', () => { beforeEach(() => { store.setCreateType('fromNew'); + store.setAddress('0x0000000000000000000000000000000000000000'); }); it('returns true on no errors', () => { @@ -337,11 +338,13 @@ describe('modals/CreateAccount/Store', () => { it('returns false on nameError', () => { store.setName(''); + expect(store.canCreate).to.be.false; }); it('returns false on passwordRepeatError', () => { store.setPassword('testing'); + expect(store.canCreate).to.be.false; }); }); From 94bfe116aa2b5908803b0f11ec2c11e79a32accd Mon Sep 17 00:00:00 2001 From: maciejhirsz Date: Tue, 4 Apr 2017 11:49:36 +0200 Subject: [PATCH 78/89] CR fixes --- js/src/api/local/accounts/accounts.js | 6 ++++-- js/src/api/local/ethkey/index.js | 3 ++- js/src/api/local/ethkey/worker.js | 5 +---- js/src/api/local/middleware.js | 3 ++- js/src/api/util/format.js | 2 +- js/src/modals/CreateAccount/NewAccount/newAccount.spec.js | 1 - 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/js/src/api/local/accounts/accounts.js b/js/src/api/local/accounts/accounts.js index 1bce1329a..e7e5cc397 100644 --- a/js/src/api/local/accounts/accounts.js +++ b/js/src/api/local/accounts/accounts.js @@ -39,7 +39,8 @@ export default class Accounts { create (secret, password) { const privateKey = Buffer.from(secret.slice(2), 'hex'); - return Account.fromPrivateKey(this.persist, privateKey, password) + return Account + .fromPrivateKey(this.persist, privateKey, password) .then((account) => { const { address } = account; @@ -87,7 +88,8 @@ export default class Accounts { return false; } - return account.isValidPassword(password) + return account + .isValidPassword(password) .then((isValid) => { if (!isValid) { return false; diff --git a/js/src/api/local/ethkey/index.js b/js/src/api/local/ethkey/index.js index 4539c8c50..a6967da25 100644 --- a/js/src/api/local/ethkey/index.js +++ b/js/src/api/local/ethkey/index.js @@ -22,7 +22,8 @@ export function createKeyObject (key, password) { } export function decryptPrivateKey (keyObject, password) { - return workerPool.getWorker() + return workerPool + .getWorker() .action('decryptPrivateKey', { keyObject, password }) .then((privateKey) => { if (privateKey) { diff --git a/js/src/api/local/ethkey/worker.js b/js/src/api/local/ethkey/worker.js index 3a0c34f7d..00f4a0bed 100644 --- a/js/src/api/local/ethkey/worker.js +++ b/js/src/api/local/ethkey/worker.js @@ -16,6 +16,7 @@ import secp256k1 from 'secp256k1/js'; import { keccak_256 as keccak256 } from 'js-sha3'; +import { bytesToHex } from '~/api/util/format'; const isWorker = typeof self !== 'undefined'; @@ -107,10 +108,6 @@ const actions = { } }; -function bytesToHex (bytes) { - return '0x' + Array.from(bytes).map(n => ('0' + n.toString(16)).slice(-2)).join(''); -} - self.onmessage = function ({ data }) { const result = route(data); diff --git a/js/src/api/local/middleware.js b/js/src/api/local/middleware.js index ece3fa17d..36a8cd2cf 100644 --- a/js/src/api/local/middleware.js +++ b/js/src/api/local/middleware.js @@ -60,7 +60,8 @@ export default class LocalAccountsMiddleware extends Middleware { register('parity_changePassword', ([address, oldPassword, newPassword]) => { const account = accounts.get(address); - return account.decryptPrivateKey(oldPassword) + return account + .decryptPrivateKey(oldPassword) .then((privateKey) => { if (!privateKey) { return false; diff --git a/js/src/api/util/format.js b/js/src/api/util/format.js index c7594b692..61fc9d32c 100644 --- a/js/src/api/util/format.js +++ b/js/src/api/util/format.js @@ -17,7 +17,7 @@ import { range } from 'lodash'; export function bytesToHex (bytes) { - return '0x' + bytes.map((b) => ('0' + b.toString(16)).slice(-2)).join(''); + return '0x' + Buffer.from(bytes).toString('hex'); } export function cleanupValue (value, type) { diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js index d6d38779f..935fe5b80 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js @@ -60,7 +60,6 @@ describe('modals/CreateAccount/NewAccount', () => { it('resets the accounts', () => { expect(instance.state.accounts).to.be.null; - // expect(Object.keys(instance.state.accounts).length).to.equal(7); }); it('resets the initial selected value', () => { From 8d0fde6f608f7794e8c18b18e762f77741473013 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Wed, 5 Apr 2017 09:06:09 +0000 Subject: [PATCH 79/89] [ci skip] js-precompiled 20170405-090226 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f36aa132e..d50bf5c3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1755,7 +1755,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#04143247380a7a9bce112c9467636684d8214973" +source = "git+https://github.com/paritytech/js-precompiled.git#9bfc6f3dfca2c337c53084bedcc65c2b526927a1" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 0585a1681..6e8b84f6d 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.45", + "version": "1.7.46", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 0d8a2c8c44cf1655e4580bbe6bae0314662ffc49 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Apr 2017 11:30:51 +0200 Subject: [PATCH 80/89] CHT key optimization --- ethcore/light/src/client/header_chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 1c218204b..d4ea8d107 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -119,7 +119,7 @@ impl Decodable for Entry { } fn cht_key(number: u64) -> String { - format!("canonical_{}", number) + format!("{:08x}_canonical", number) } fn era_key(number: u64) -> String { From e2dfea8c12978619db30d8f44183ff3909f215b7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Apr 2017 11:57:29 +0200 Subject: [PATCH 81/89] set gas limit before proving transactions --- ethcore/light/src/on_demand/request.rs | 4 +++- ethcore/src/client/client.rs | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index cda1d6feb..d3bb06888 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -243,12 +243,14 @@ impl TransactionProof { pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution { let root = self.header.state_root(); + let mut env_info = self.env_info.clone(); + env_info.gas_limit = self.tx.gas.clone(); state::check_proof( state_items, root, &self.tx, &*self.engine, - &self.env_info, + &env_info, ) } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 3accc777f..4bd29d100 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -395,7 +395,7 @@ impl Client { if header.number() < self.engine().params().validate_receipts_transition && header.receipts_root() != locked_block.block().header().receipts_root() { locked_block = locked_block.strip_receipts(); } - + // Final Verification if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); @@ -1627,10 +1627,12 @@ impl ::client::ProvingBlockChainClient for Client { } fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { - let (state, env_info) = match (self.state_at(id), self.env_info(id)) { + let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) { (Some(s), Some(e)) => (s, e), _ => return None, }; + + env_info.gas_limit = transaction.gas.clone(); let mut jdb = self.state_db.lock().journal_db().boxed_clone(); let backend = state::backend::Proving::new(jdb.as_hashdb_mut()); From 81db3461fe11ceee9e012d98bb20f3a514285ae3 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2017 12:27:37 +0200 Subject: [PATCH 82/89] Added an assert --- ethcore/src/snapshot/account.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 23ba31e17..b17c3eb48 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -103,7 +103,8 @@ pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, } if let Some(pair) = leftover.take() { - account_stream.append_raw_checked(&pair, 1, target_chunk_size); + let leftover_appended = account_stream.append_raw_checked(&pair, 1, target_chunk_size); + assert!(leftover_appended); } loop { From 455f994673f3fbf10188be45eda530f7bb0f6696 Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Wed, 5 Apr 2017 16:27:44 +0200 Subject: [PATCH 83/89] Don't use port 8080 anymore (#5397) --- js/src/environment/index.js | 2 +- js/src/secureApi.js | 2 +- js/webpack/shared.js | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/js/src/environment/index.js b/js/src/environment/index.js index d1522c829..1123ddd9b 100644 --- a/js/src/environment/index.js +++ b/js/src/environment/index.js @@ -23,7 +23,7 @@ const parityNode = ( process.env.PARITY_URL && `http://${process.env.PARITY_URL}` ) || ( process.env.NODE_ENV === 'production' - ? 'http://127.0.0.1:8080' + ? 'http://127.0.0.1:8545' : '' ); diff --git a/js/src/secureApi.js b/js/src/secureApi.js index b2e08730d..e19d7ae99 100644 --- a/js/src/secureApi.js +++ b/js/src/secureApi.js @@ -28,7 +28,7 @@ export default class SecureApi extends Api { _tokens = []; _dappsInterface = null; - _dappsPort = 8080; + _dappsPort = 8545; _signerPort = 8180; static getTransport (url, sysuiToken) { diff --git a/js/webpack/shared.js b/js/webpack/shared.js index 2f151944e..3e2eef8f1 100644 --- a/js/webpack/shared.js +++ b/js/webpack/shared.js @@ -171,13 +171,13 @@ function addProxies (app) { })); app.use('/api', proxy({ - target: 'http://127.0.0.1:8080', + target: 'http://127.0.0.1:8545', changeOrigin: true, autoRewrite: true })); app.use('/app', proxy({ - target: 'http://127.0.0.1:8080', + target: 'http://127.0.0.1:8545', changeOrigin: true, pathRewrite: { '^/app': '' @@ -193,7 +193,7 @@ function addProxies (app) { })); app.use('/rpc', proxy({ - target: 'http://127.0.0.1:8080', + target: 'http://127.0.0.1:8545', changeOrigin: true })); } From 03ec27be666452f3cbc751ff51c3e53c629e9d26 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2017 16:50:06 +0200 Subject: [PATCH 84/89] Removed assert --- ethcore/src/snapshot/account.rs | 5 +++-- ethcore/src/snapshot/error.rs | 3 +++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index b17c3eb48..cef32bc93 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -103,8 +103,9 @@ pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, } if let Some(pair) = leftover.take() { - let leftover_appended = account_stream.append_raw_checked(&pair, 1, target_chunk_size); - assert!(leftover_appended); + if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) { + return Err(Error::ChunkTooSmall); + } } loop { diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index da8a9816b..c1391b300 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -55,6 +55,8 @@ pub enum Error { Io(::std::io::Error), /// Snapshot version is not supported. VersionNotSupported(u64), + /// Max chunk size is to small to fit basic account data. + ChunkTooSmall, } impl fmt::Display for Error { @@ -76,6 +78,7 @@ impl fmt::Display for Error { Error::Decoder(ref err) => err.fmt(f), Error::Trie(ref err) => err.fmt(f), Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver), + Error::ChunkTooSmall => write!(f, "Chunk size is too small."), } } } From 5ebd38f1d6f3f7708294ef7b2d4f70b64dc54887 Mon Sep 17 00:00:00 2001 From: GitLab Build Bot Date: Wed, 5 Apr 2017 14:58:00 +0000 Subject: [PATCH 85/89] [ci skip] js-precompiled 20170405-145308 --- Cargo.lock | 2 +- js/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 03823d6e9..77652d907 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1757,7 +1757,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#9bfc6f3dfca2c337c53084bedcc65c2b526927a1" +source = "git+https://github.com/paritytech/js-precompiled.git#606f5bf9966b93fcc695e43d648bb9b9fe178c3f" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/js/package.json b/js/package.json index 6e8b84f6d..3836edaa8 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.46", + "version": "1.7.47", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", From 91dbf3c09d38ce2086fd875cefff2083036f9fdd Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Wed, 5 Apr 2017 21:35:53 +0300 Subject: [PATCH 86/89] init switch to parity docker hub https://hub.docker.com/r/parity/rust/ --- .gitlab-ci.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 834b638ff..7d73368ff 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,13 +14,14 @@ cache: untracked: true linux-stable: stage: build - image: ethcore/rust:stable + image: parity/rust:gitlab-ci only: - beta - tags - stable - triggers script: + - rustup default stable - cargo build -j $(nproc) --release --features final $CARGOFLAGS - cargo build -j $(nproc) --release -p evmbin - cargo build -j $(nproc) --release -p ethstore @@ -105,13 +106,14 @@ linux-stable-debian: name: "stable-x86_64-unknown-debian-gnu_parity" linux-beta: stage: build - image: ethcore/rust:beta + image: parity/rust:gitlab-ci only: - beta - tags - stable - triggers script: + - rustup default beta - cargo build -j $(nproc) --release $CARGOFLAGS - strip target/release/parity tags: @@ -124,13 +126,14 @@ linux-beta: allow_failure: true linux-nightly: stage: build - image: ethcore/rust:nightly + image: parity/rust:gitlab-ci only: - beta - tags - stable - triggers script: + - rustup default nightly - cargo build -j $(nproc) --release $CARGOFLAGS - strip target/release/parity tags: From 89a4cb08e822568b2734fe9d2d49ffa0ef23ad4e Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Wed, 5 Apr 2017 21:40:12 +0300 Subject: [PATCH 87/89] Update .gitlab-ci.yml --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7d73368ff..5d90c5d4e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -547,11 +547,12 @@ test-windows: allow_failure: true test-rust-stable: stage: test - image: ethcore/rust:stable + image: parity/rust:gitlab-ci before_script: - git submodule update --init --recursive - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l) script: + - rustup show - export RUST_BACKTRACE=1 - if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi tags: From d74e044be4deefb9a6c01e4d5cb1170e35b1e9c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 6 Apr 2017 19:32:30 +0200 Subject: [PATCH 88/89] Fixing compilation without dapps. (#5410) --- parity/dapps.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/parity/dapps.rs b/parity/dapps.rs index 4cdd1e550..759eb4cde 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -82,7 +82,7 @@ impl ContractClient for FullRegistrar { // TODO: light client implementation forwarding to OnDemand and waiting for future // to resolve. pub struct Dependencies { - pub sync_status: Arc<::parity_dapps::SyncStatus>, + pub sync_status: Arc, pub contract_client: Arc, pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, @@ -103,8 +103,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) ).map(Some) } -pub use self::server::Middleware; -pub use self::server::dapps_middleware; +pub use self::server::{SyncStatus, Middleware, dapps_middleware}; #[cfg(not(feature = "dapps"))] mod server { @@ -112,11 +111,12 @@ mod server { use std::path::PathBuf; use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction}; - pub struct Middleware; + pub type SyncStatus = Fn() -> bool; + pub struct Middleware; impl RequestMiddleware for Middleware { fn on_request( - &self, req: &hyper::server::Request, control: &hyper::Control + &self, _req: &hyper::server::Request, _control: &hyper::Control ) -> RequestMiddlewareAction { unreachable!() } @@ -142,6 +142,7 @@ mod server { use parity_reactor; pub type Middleware = parity_dapps::Middleware; + pub use parity_dapps::SyncStatus; pub fn dapps_middleware( deps: Dependencies, From f223ed21a55d1e793d96056ce7b56ded55f14251 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 6 Apr 2017 19:38:33 +0200 Subject: [PATCH 89/89] APIs wildcards and simple arithmetic. (#5402) --- parity/cli/usage.txt | 3 +- parity/configuration.rs | 17 +++---- parity/rpc_apis.rs | 100 ++++++++++++++++++++++++++++++++++------ 3 files changed, 95 insertions(+), 25 deletions(-) diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 1e5f3c0fb..7ee2860b1 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -147,8 +147,9 @@ API and Console Options: (default: {flag_jsonrpc_cors:?}) --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited list of API - name. Possible name are web3, eth, net, personal, + name. Possible name are all, safe, web3, eth, net, personal, parity, parity_set, traces, rpc, parity_accounts. + You can also disable a specific API by putting '-' in the front: all,-personal (default: {flag_jsonrpc_apis}). --jsonrpc-hosts HOSTS List of allowed Host header values. This option will validate the Host header sent by the browser, it diff --git a/parity/configuration.rs b/parity/configuration.rs index f585dc22e..7061610bb 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -30,6 +30,7 @@ use ethcore::miner::{MinerOptions, Banning, StratumOptions}; use ethcore::verification::queue::VerifierSettings; use rpc::{IpcConfiguration, HttpConfiguration}; +use rpc_apis::ApiSet; use ethcore_rpc::NetworkSettings; use cache::CacheConfig; use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_for_db, @@ -718,16 +719,7 @@ impl Configuration { .collect(); if self.args.flag_geth { - apis.push("personal"); - } - - if self.args.flag_public_node { - apis.retain(|api| { - match *api { - "eth" | "net" | "parity" | "rpc" | "web3" => true, - _ => false - } - }); + apis.insert(0, "personal"); } apis.join(",") @@ -788,7 +780,10 @@ impl Configuration { enabled: self.rpc_enabled(), interface: self.rpc_interface(), port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), - apis: self.rpc_apis().parse()?, + apis: match self.args.flag_public_node { + false => self.rpc_apis().parse()?, + true => self.rpc_apis().parse::()?.retain(ApiSet::PublicContext), + }, hosts: self.rpc_hosts(), cors: self.rpc_cors(), threads: match self.args.flag_jsonrpc_threads { diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index ea1eabc61..cf3c0b7c9 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -85,9 +85,17 @@ impl FromStr for Api { #[derive(Debug, Clone)] pub enum ApiSet { + // Safe context (like token-protected WS interface) SafeContext, + // Unsafe context (like jsonrpc over http) UnsafeContext, + // Public context (like public jsonrpc over http) + PublicContext, + // All possible APIs + All, + // Local "unsafe" context and accounts access IpcContext, + // Fixed list of APis List(HashSet), } @@ -107,10 +115,30 @@ impl FromStr for ApiSet { type Err = String; fn from_str(s: &str) -> Result { - s.split(',') - .map(Api::from_str) - .collect::>() - .map(ApiSet::List) + let mut apis = HashSet::new(); + + for api in s.split(',') { + match api { + "all" => { + apis.extend(ApiSet::All.list_apis()); + }, + "safe" => { + // Safe APIs are those that are safe even in UnsafeContext. + apis.extend(ApiSet::UnsafeContext.list_apis()); + }, + // Remove the API + api if api.starts_with("-") => { + let api = api[1..].parse()?; + apis.remove(&api); + }, + api => { + let api = api.parse()?; + apis.insert(api); + }, + } + } + + Ok(ApiSet::List(apis)) } } @@ -403,21 +431,41 @@ impl Dependencies for LightDependencies { } impl ApiSet { + /// Retains only APIs in given set. + pub fn retain(self, set: Self) -> Self { + ApiSet::List(&self.list_apis() & &set.list_apis()) + } + pub fn list_apis(&self) -> HashSet { - let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc] - .into_iter().collect(); + let mut public_list = vec![ + Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Rpc, + ].into_iter().collect(); match *self { ApiSet::List(ref apis) => apis.clone(), - ApiSet::UnsafeContext => safe_list, + ApiSet::PublicContext => public_list, + ApiSet::UnsafeContext => { + public_list.insert(Api::Traces); + public_list + }, ApiSet::IpcContext => { - safe_list.insert(Api::ParityAccounts); - safe_list + public_list.insert(Api::Traces); + public_list.insert(Api::ParityAccounts); + public_list }, ApiSet::SafeContext => { - safe_list.insert(Api::ParityAccounts); - safe_list.insert(Api::ParitySet); - safe_list.insert(Api::Signer); - safe_list + public_list.insert(Api::Traces); + public_list.insert(Api::ParityAccounts); + public_list.insert(Api::ParitySet); + public_list.insert(Api::Signer); + public_list + }, + ApiSet::All => { + public_list.insert(Api::Traces); + public_list.insert(Api::ParityAccounts); + public_list.insert(Api::ParitySet); + public_list.insert(Api::Signer); + public_list.insert(Api::Personal); + public_list }, } } @@ -493,4 +541,30 @@ mod test { ].into_iter().collect(); assert_eq!(ApiSet::SafeContext.list_apis(), expected); } + + #[test] + fn test_all_apis() { + assert_eq!("all".parse::().unwrap(), ApiSet::List(vec![ + Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, + Api::ParityAccounts, + Api::ParitySet, Api::Signer, + Api::Personal + ].into_iter().collect())); + } + + #[test] + fn test_all_without_personal_apis() { + assert_eq!("personal,all,-personal".parse::().unwrap(), ApiSet::List(vec![ + Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, + Api::ParityAccounts, + Api::ParitySet, Api::Signer, + ].into_iter().collect())); + } + + #[test] + fn test_safe_parsing() { + assert_eq!("safe".parse::().unwrap(), ApiSet::List(vec![ + Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc, + ].into_iter().collect())); + } }