From 1b6dead109865300020010f1ab0c5f4ea1e203a8 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Sun, 17 Jan 2016 15:56:09 +0100 Subject: [PATCH 001/138] Fixing clippy stuff - work in progress --- src/account.rs | 4 ++-- src/account_diff.rs | 23 +++++++++++------------ src/blockchain.rs | 10 ++++------ src/builtin.rs | 15 ++++++--------- src/evm/interpreter.rs | 10 +++++----- src/executive.rs | 32 ++++++++++++++------------------ src/externalities.rs | 1 + src/header.rs | 9 +++++---- src/lib.rs | 2 +- src/receipt.rs | 2 +- src/service.rs | 15 ++++----------- src/spec.rs | 25 +++++++++++++------------ src/state.rs | 19 ++++++++++--------- src/state_diff.rs | 2 +- src/sync/chain.rs | 39 +++++++++++++++------------------------ src/transaction.rs | 9 ++++----- src/views.rs | 2 +- 17 files changed, 98 insertions(+), 121 deletions(-) diff --git a/src/account.rs b/src/account.rs index 8c36c7cbd..5424356b7 100644 --- a/src/account.rs +++ b/src/account.rs @@ -103,7 +103,7 @@ impl Account { /// Get (and cache) the contents of the trie's storage at `key`. pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 { self.storage_overlay.borrow_mut().entry(key.clone()).or_insert_with(||{ - (Filth::Clean, H256::from(SecTrieDB::new(db, &self.storage_root).get(key.bytes()).map(|v| -> U256 {decode(v)}).unwrap_or(U256::zero()))) + (Filth::Clean, H256::from(SecTrieDB::new(db, &self.storage_root).get(key.bytes()).map_or(U256::zero(), |v| -> U256 {decode(v)}))) }).1.clone() } @@ -149,7 +149,7 @@ impl Account { /// Provide a database to lookup `code_hash`. Should not be called if it is a contract without code. pub fn cache_code(&mut self, db: &HashDB) -> bool { // TODO: fill out self.code_cache; - return self.is_cached() || + self.is_cached() || match self.code_hash { Some(ref h) => match db.lookup(h) { Some(x) => { self.code_cache = x.to_vec(); true }, diff --git a/src/account_diff.rs b/src/account_diff.rs index 2bf138669..05c4d00a6 100644 --- a/src/account_diff.rs +++ b/src/account_diff.rs @@ -12,10 +12,10 @@ pub enum Existance { impl fmt::Display for Existance { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - &Existance::Born => try!(write!(f, "+++")), - &Existance::Alive => try!(write!(f, "***")), - &Existance::Died => try!(write!(f, "XXX")), + match *self { + Existance::Born => try!(write!(f, "+++")), + Existance::Alive => try!(write!(f, "***")), + Existance::Died => try!(write!(f, "XXX")), } Ok(()) } @@ -102,16 +102,15 @@ impl fmt::Display for AccountDiff { Diff::Changed(ref pre, ref post) => try!(write!(f, "${} ({} {} {})", post, pre, if pre > post {"-"} else {"+"}, *max(pre, post) - *min(pre, post))), _ => {}, } - match self.code { - Diff::Born(ref x) => try!(write!(f, " code {}", x.pretty())), - _ => {}, + if let Diff::Born(ref x) = self.code { + try!(write!(f, " code {}", x.pretty())); } try!(write!(f, "\n")); - for (k, dv) in self.storage.iter() { - match dv { - &Diff::Born(ref v) => try!(write!(f, " + {} => {}\n", interpreted_hash(k), interpreted_hash(v))), - &Diff::Changed(ref pre, ref post) => try!(write!(f, " * {} => {} (was {})\n", interpreted_hash(k), interpreted_hash(post), interpreted_hash(pre))), - &Diff::Died(_) => try!(write!(f, " X {}\n", interpreted_hash(k))), + for (k, dv) in &self.storage { + match *dv { + Diff::Born(ref v) => try!(write!(f, " + {} => {}\n", interpreted_hash(k), interpreted_hash(v))), + Diff::Changed(ref pre, ref post) => try!(write!(f, " * {} => {} (was {})\n", interpreted_hash(k), interpreted_hash(post), interpreted_hash(pre))), + Diff::Died(_) => try!(write!(f, " X {}\n", interpreted_hash(k))), _ => {}, } } diff --git a/src/blockchain.rs b/src/blockchain.rs index f08d15057..3c44e8354 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -127,9 +127,8 @@ impl BlockProvider for BlockChain { fn block(&self, hash: &H256) -> Option { { let read = self.blocks.read().unwrap(); - match read.get(hash) { - Some(v) => return Some(v.clone()), - None => () + if let Some(v) = read.get(hash) { + return Some(v.clone()); } } @@ -509,9 +508,8 @@ impl BlockChain { T: ExtrasIndexable { { let read = cache.read().unwrap(); - match read.get(hash) { - Some(_) => return true, - None => () + if let Some(_) = read.get(hash) { + return true; } } diff --git a/src/builtin.rs b/src/builtin.rs index 85319c948..9916cadb4 100644 --- a/src/builtin.rs +++ b/src/builtin.rs @@ -93,16 +93,13 @@ pub fn new_builtin_exec(name: &str) -> Option> { if it.v == H256::from(&U256::from(27)) || it.v == H256::from(&U256::from(28)) { let s = Signature::from_rsv(&it.r, &it.s, it.v[31] - 27); if ec::is_valid(&s) { - match ec::recover(&s, &it.hash) { - Ok(p) => { - let r = p.as_slice().sha3(); - // NICE: optimise and separate out into populate-like function - for i in 0..min(32, output.len()) { - output[i] = if i < 12 {0} else {r[i]}; - } + if let Ok(p) = ec::recover(&s, &it.hash) { + let r = p.as_slice().sha3(); + // NICE: optimise and separate out into populate-like function + for i in 0..min(32, output.len()) { + output[i] = if i < 12 {0} else {r[i]}; } - _ => {} - }; + } } } })), diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index e6499b022..bfdd182a9 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -65,7 +65,7 @@ impl VecStack { impl Stack for VecStack { fn peek(&self, no_from_top: usize) -> &S { - return &self.stack[self.stack.len() - no_from_top - 1]; + &self.stack[self.stack.len() - no_from_top - 1] } fn swap_with_top(&mut self, no_from_top: usize) { @@ -150,7 +150,7 @@ impl Memory for Vec { } fn size(&self) -> usize { - return self.len() + self.len() } fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] { @@ -820,7 +820,7 @@ impl Interpreter { fn copy_data_to_memory(&self, mem: &mut Memory, stack: &mut Stack, - data: &Bytes) { + data: &[u8]) { let offset = stack.pop_back(); let index = stack.pop_back(); let size = stack.pop_back(); @@ -1097,7 +1097,7 @@ impl Interpreter { Ok(()) } - fn find_jump_destinations(&self, code: &Bytes) -> HashSet { + fn find_jump_destinations(&self, code: &[u8]) -> HashSet { let mut jump_dests = HashSet::new(); let mut position = 0; @@ -1112,7 +1112,7 @@ impl Interpreter { position += 1; } - return jump_dests; + jump_dests } } diff --git a/src/executive.rs b/src/executive.rs index 2a26dd3f0..8950c5b09 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -123,8 +123,8 @@ impl<'a> Executive<'a> { let mut substate = Substate::new(); - let res = match t.action() { - &Action::Create => { + let res = match *t.action() { + Action::Create => { let new_address = contract_address(&sender, &nonce); let params = ActionParams { code_address: new_address.clone(), @@ -139,7 +139,7 @@ impl<'a> Executive<'a> { }; self.create(params, &mut substate) }, - &Action::Call(ref address) => { + Action::Call(ref address) => { let params = ActionParams { code_address: address.clone(), address: address.clone(), @@ -177,7 +177,7 @@ impl<'a> Executive<'a> { // if destination is builtin, try to execute it let default = []; - let data = if let &Some(ref d) = ¶ms.data { d as &[u8] } else { &default as &[u8] }; + let data = if let Some(ref d) = params.data { d as &[u8] } else { &default as &[u8] }; let cost = self.engine.cost_of_builtin(¶ms.code_address, data); match cost <= params.gas { @@ -248,7 +248,7 @@ impl<'a> Executive<'a> { let refunds_bound = sstore_refunds + suicide_refunds; // real ammount to refund - let gas_left_prerefund = match &result { &Ok(x) => x, _ => x!(0) }; + let gas_left_prerefund = match result { Ok(x) => x, _ => x!(0) }; let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) / U256::from(2)); let gas_left = gas_left_prerefund + refunded; @@ -265,7 +265,7 @@ impl<'a> Executive<'a> { self.state.add_balance(&self.info.author, &fees_value); // perform suicides - for address in substate.suicides.iter() { + for address in &substate.suicides { trace!("Killing {}", address); self.state.kill_account(address); } @@ -273,11 +273,7 @@ impl<'a> Executive<'a> { match result { Err(evm::Error::Internal) => Err(ExecutionError::Internal), // TODO [ToDr] BadJumpDestination @debris - how to handle that? - Err(evm::Error::OutOfGas) - | Err(evm::Error::BadJumpDestination { destination: _ }) - | Err(evm::Error::BadInstruction { instruction: _ }) - | Err(evm::Error::StackUnderflow {instruction: _, wanted: _, on_stack: _}) - | Err(evm::Error::OutOfStack {instruction: _, wanted: _, limit: _}) => { + Err(_) => { Ok(Executed { gas: t.gas, gas_used: t.gas, @@ -302,15 +298,15 @@ impl<'a> Executive<'a> { fn enact_result(&mut self, result: &evm::Result, substate: &mut Substate, un_substate: Substate, backup: State) { // TODO: handle other evm::Errors same as OutOfGas once they are implemented - match result { - &Err(evm::Error::OutOfGas) - | &Err(evm::Error::BadJumpDestination { destination: _ }) - | &Err(evm::Error::BadInstruction { instruction: _ }) - | &Err(evm::Error::StackUnderflow {instruction: _, wanted: _, on_stack: _}) - | &Err(evm::Error::OutOfStack {instruction: _, wanted: _, limit: _}) => { + match *result { + Err(evm::Error::OutOfGas) + | Err(evm::Error::BadJumpDestination {..}) + | Err(evm::Error::BadInstruction {.. }) + | Err(evm::Error::StackUnderflow {..}) + | Err(evm::Error::OutOfStack {..}) => { self.state.revert(backup); }, - &Ok(_) | &Err(evm::Error::Internal) => substate.accrue(un_substate) + Ok(_) | Err(evm::Error::Internal) => substate.accrue(un_substate) } } } diff --git a/src/externalities.rs b/src/externalities.rs index 8b16cc72b..9c07f005c 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -161,6 +161,7 @@ impl<'a> Ext for Externalities<'a> { self.state.code(address).unwrap_or(vec![]) } + #[allow(match_ref_pats)] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { diff --git a/src/header.rs b/src/header.rs index 0986a0d80..b09e0afcf 100644 --- a/src/header.rs +++ b/src/header.rs @@ -135,9 +135,10 @@ impl Header { s.append(&self.gas_used); s.append(&self.timestamp); s.append(&self.extra_data); - match with_seal { - Seal::With => for b in self.seal.iter() { s.append_raw(&b, 1); }, - _ => {} + if let Seal::With = with_seal { + for b in &self.seal { + s.append_raw(&b, 1); + } } } @@ -198,7 +199,7 @@ impl Encodable for Header { self.timestamp.encode(e); self.extra_data.encode(e); - for b in self.seal.iter() { + for b in &self.seal { e.emit_raw(&b); } }) diff --git a/src/lib.rs b/src/lib.rs index 7e4fdf33e..3bbfe3c10 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,7 +73,6 @@ //! sudo make install //! sudo ldconfig //! ``` - #[macro_use] extern crate log; extern crate rustc_serialize; @@ -86,6 +85,7 @@ extern crate env_logger; #[cfg(feature = "jit" )] extern crate evmjit; #[macro_use] +#[allow(match_bool)] extern crate ethcore_util as util; pub mod common; diff --git a/src/receipt.rs b/src/receipt.rs index 21a66f5cf..a1b2fbe6f 100644 --- a/src/receipt.rs +++ b/src/receipt.rs @@ -31,7 +31,7 @@ impl RlpStandard for Receipt { // TODO: make work: //s.append(&self.logs); s.append_list(self.logs.len()); - for l in self.logs.iter() { + for l in &self.logs { l.rlp_append(s); } } diff --git a/src/service.rs b/src/service.rs index 3bc137c9c..06f490c30 100644 --- a/src/service.rs +++ b/src/service.rs @@ -51,20 +51,13 @@ impl IoHandler for ClientIoHandler { fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>) { } + #[allow(match_ref_pats)] fn message<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, net_message: &'s mut NetSyncMessage) { - match net_message { - &mut UserMessage(ref mut message) => { - match message { - &mut SyncMessage::BlockVerified(ref mut bytes) => { - self.client.write().unwrap().import_verified_block(mem::replace(bytes, Bytes::new())); - }, - _ => {}, // ignore other messages - } - + if let &mut UserMessage(ref mut message) = net_message { + if let &mut SyncMessage::BlockVerified(ref mut bytes) = message { + self.client.write().unwrap().import_verified_block(mem::replace(bytes, Bytes::new())); } - _ => {}, // ignore other messages } - } } diff --git a/src/spec.rs b/src/spec.rs index b174b0e9f..210b46dca 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -10,7 +10,7 @@ pub fn gzip64res_to_json(source: &[u8]) -> Json { let data = source.from_base64().expect("Genesis block is malformed!"); let data_ref: &[u8] = &data; let mut decoder = GzDecoder::new(data_ref).expect("Gzip is invalid"); - let mut s: String = "".to_string(); + let mut s: String = "".to_owned(); decoder.read_to_string(&mut s).expect("Gzip is invalid"); Json::from_str(&s).expect("Json is invalid") } @@ -18,14 +18,14 @@ pub fn gzip64res_to_json(source: &[u8]) -> Json { /// Convert JSON value to equivlaent RLP representation. // TODO: handle container types. fn json_to_rlp(json: &Json) -> Bytes { - match json { - &Json::Boolean(o) => encode(&(if o {1u64} else {0})), - &Json::I64(o) => encode(&(o as u64)), - &Json::U64(o) => encode(&o), - &Json::String(ref s) if s.len() >= 2 && &s[0..2] == "0x" && U256::from_str(&s[2..]).is_ok() => { + match *json { + Json::Boolean(o) => encode(&(if o {1u64} else {0})), + Json::I64(o) => encode(&(o as u64)), + Json::U64(o) => encode(&o), + Json::String(ref s) if s.len() >= 2 && &s[0..2] == "0x" && U256::from_str(&s[2..]).is_ok() => { encode(&U256::from_str(&s[2..]).unwrap()) }, - &Json::String(ref s) => { + Json::String(ref s) => { encode(s) }, _ => panic!() @@ -96,6 +96,7 @@ pub struct Spec { impl Spec { /// Convert this object into a boxed Engine of the right underlying type. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. + #[allow(wrong_self_convention)] pub fn to_engine(self) -> Result, Error> { match self.engine_name.as_ref() { "NullEngine" => Ok(NullEngine::new_boxed(self)), @@ -169,8 +170,8 @@ impl FromJson for Spec { builtins.insert(addr.clone(), builtin); } } - let balance = acc.find("balance").and_then(|x| match x { &Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None }); - let nonce = acc.find("nonce").and_then(|x| match x { &Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None }); + let balance = acc.find("balance").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None }); + let nonce = acc.find("nonce").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None }); // let balance = if let Some(&Json::String(ref b)) = acc.find("balance") {U256::from_dec_str(b).unwrap_or(U256::from(0))} else {U256::from(0)}; // let nonce = if let Some(&Json::String(ref n)) = acc.find("nonce") {U256::from_dec_str(n).unwrap_or(U256::from(0))} else {U256::from(0)}; // TODO: handle code & data if they exist. @@ -199,8 +200,8 @@ impl FromJson for Spec { Spec { - name: json.find("name").map(|j| j.as_string().unwrap()).unwrap_or("unknown").to_string(), - engine_name: json["engineName"].as_string().unwrap().to_string(), + name: json.find("name").map(|j| j.as_string().unwrap()).unwrap_or("unknown").to_owned(), + engine_name: json["engineName"].as_string().unwrap().to_owned(), engine_params: json_to_rlp_map(&json["params"]), builtins: builtins, parent_hash: H256::from_str(&genesis["parentHash"].as_string().unwrap()[2..]).unwrap(), @@ -226,7 +227,7 @@ impl Spec { let mut root = H256::new(); { let mut t = SecTrieDBMut::new(db, &mut root); - for (address, account) in self.genesis_state.iter() { + for (address, account) in &self.genesis_state { t.insert(address.as_slice(), &account.rlp()); } } diff --git a/src/state.rs b/src/state.rs index fc4d6f817..445d5a85c 100644 --- a/src/state.rs +++ b/src/state.rs @@ -87,22 +87,22 @@ impl State { /// Get the balance of account `a`. pub fn balance(&self, a: &Address) -> U256 { - self.get(a, false).as_ref().map(|account| account.balance().clone()).unwrap_or(U256::from(0u8)) + self.get(a, false).as_ref().map_or(U256::zero(), |account| account.balance().clone()) } /// Get the nonce of account `a`. pub fn nonce(&self, a: &Address) -> U256 { - self.get(a, false).as_ref().map(|account| account.nonce().clone()).unwrap_or(U256::from(0u8)) + self.get(a, false).as_ref().map_or(U256::zero(), |account| account.nonce().clone()) } /// Mutate storage of account `a` so that it is `value` for `key`. pub fn storage_at(&self, a: &Address, key: &H256) -> H256 { - self.get(a, false).as_ref().map(|a|a.storage_at(&self.db, key)).unwrap_or(H256::new()) + self.get(a, false).as_ref().map_or(H256::new(), |a|a.storage_at(&self.db, key)) } /// Mutate storage of account `a` so that it is `value` for `key`. pub fn code(&self, a: &Address) -> Option { - self.get(a, true).as_ref().map(|a|a.code().map(|x|x.to_vec())).unwrap_or(None) + self.get(a, true).as_ref().map_or(None, |a|a.code().map(|x|x.to_vec())) } /// Add `incr` to the balance of account `a`. @@ -168,6 +168,7 @@ impl State { /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. + #[allow(match_ref_pats)] pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap>) { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? @@ -184,9 +185,9 @@ impl State { { let mut trie = SecTrieDBMut::from_existing(db, root); for (address, ref a) in accounts.iter() { - match a { - &&Some(ref account) => trie.insert(address, &account.rlp()), - &&None => trie.remove(address), + match **a { + Some(ref account) => trie.insert(address, &account.rlp()), + None => trie.remove(address), } } } @@ -208,7 +209,7 @@ impl State { pub fn to_hashmap_pod(&self) -> HashMap { // TODO: handle database rather than just the cache. self.cache.borrow().iter().fold(HashMap::new(), |mut m, (add, opt)| { - if let &Some(ref acc) = opt { + if let Some(ref acc) = *opt { m.insert(add.clone(), PodAccount::from_account(acc)); } m @@ -219,7 +220,7 @@ impl State { pub fn to_pod(&self) -> PodState { // TODO: handle database rather than just the cache. PodState::new(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { - if let &Some(ref acc) = opt { + if let Some(ref acc) = *opt { m.insert(add.clone(), PodAccount::from_account(acc)); } m diff --git a/src/state_diff.rs b/src/state_diff.rs index d603dda5e..1cc04b2d0 100644 --- a/src/state_diff.rs +++ b/src/state_diff.rs @@ -14,7 +14,7 @@ impl StateDiff { impl fmt::Display for StateDiff { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (add, acc) in self.0.iter() { + for (add, acc) in &self.0 { try!(write!(f, "{} {}: {}", acc.existance(), add, acc)); } Ok(()) diff --git a/src/sync/chain.rs b/src/sync/chain.rs index 43f5968f4..181a8a9aa 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -212,7 +212,7 @@ impl ChainSync { self.downloading_bodies.clear(); self.headers.clear(); self.bodies.clear(); - for (_, ref mut p) in self.peers.iter_mut() { + for (_, ref mut p) in &mut self.peers { p.asking_blocks.clear(); } self.header_ids.clear(); @@ -375,7 +375,7 @@ impl ChainSync { transactions_root: tx_root, uncles: uncles }; - match self.header_ids.get(&header_id).map(|n| *n) { + match self.header_ids.get(&header_id).cloned() { Some(n) => { self.header_ids.remove(&header_id); self.bodies.insert_item(n, body.as_raw().to_vec()); @@ -699,16 +699,13 @@ impl ChainSync { /// Used to recover from an error and re-download parts of the chain detected as bad. fn remove_downloaded_blocks(&mut self, start: BlockNumber) { for n in self.headers.get_tail(&start) { - match self.headers.find_item(&n) { - Some(ref header_data) => { - let header_to_delete = HeaderView::new(&header_data.data); - let header_id = HeaderId { - transactions_root: header_to_delete.transactions_root(), - uncles: header_to_delete.uncles_hash() - }; - self.header_ids.remove(&header_id); - }, - None => {} + if let Some(ref header_data) = self.headers.find_item(&n) { + let header_to_delete = HeaderView::new(&header_data.data); + let header_id = HeaderId { + transactions_root: header_to_delete.transactions_root(), + uncles: header_to_delete.uncles_hash() + }; + self.header_ids.remove(&header_id); } self.downloading_bodies.remove(&n); self.downloading_headers.remove(&n); @@ -796,12 +793,9 @@ impl ChainSync { packet.append(&chain.best_block_hash); packet.append(&chain.genesis_hash); //TODO: handle timeout for status request - match io.send(peer_id, STATUS_PACKET, packet.out()) { - Err(e) => { - warn!(target:"sync", "Error sending status request: {:?}", e); - io.disable_peer(peer_id); - } - Ok(_) => () + if let Err(e) = io.send(peer_id, STATUS_PACKET, packet.out()) { + warn!(target:"sync", "Error sending status request: {:?}", e); + io.disable_peer(peer_id); } } @@ -837,12 +831,9 @@ impl ChainSync { let mut data = Bytes::new(); let inc = (skip + 1) as BlockNumber; while number <= last && number > 0 && count < max_count { - match io.chain().block_header_at(number) { - Some(mut hdr) => { - data.append(&mut hdr); - count += 1; - } - None => {} + if let Some(mut hdr) = io.chain().block_header_at(number) { + data.append(&mut hdr); + count += 1; } if reverse { if number <= inc { diff --git a/src/transaction.rs b/src/transaction.rs index 4f547a243..51bde4b86 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -104,9 +104,8 @@ impl Transaction { }; s.append(&self.value); s.append(&self.data); - match with_seal { - Seal::With => { s.append(&(self.v as u16)).append(&self.r).append(&self.s); }, - _ => {} + if let Seal::With = with_seal { + s.append(&(self.v as u16)).append(&self.r).append(&self.s); } } @@ -125,7 +124,7 @@ impl FromJson for Transaction { gas_price: xjson!(&json["gasPrice"]), gas: xjson!(&json["gasLimit"]), action: match Bytes::from_json(&json["to"]) { - ref x if x.len() == 0 => Action::Create, + ref x if x.is_empty() => Action::Create, ref x => Action::Call(Address::from_slice(x)), }, value: xjson!(&json["value"]), @@ -290,4 +289,4 @@ fn signing() { let key = KeyPair::create().unwrap(); let t = Transaction::new_create(U256::from(42u64), b"Hello!".to_vec(), U256::from(3000u64), U256::from(50_000u64), U256::from(1u64)).signed(&key.secret()); assert_eq!(Address::from(key.public().sha3()), t.sender().unwrap()); -} \ No newline at end of file +} diff --git a/src/views.rs b/src/views.rs index 6c616774d..6bd1bef51 100644 --- a/src/views.rs +++ b/src/views.rs @@ -98,7 +98,7 @@ impl<'a> BlockView<'a> { /// Return List of transactions in given block. pub fn transaction_views(&self) -> Vec { - self.rlp.at(1).iter().map(|rlp| TransactionView::new_from_rlp(rlp)).collect() + self.rlp.at(1).iter().map(TransactionView::new_from_rlp).collect() } /// Return List of transactions in given block. From f169951d4ec06237cd9ab53227fda41ba2e85dc3 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 11:10:38 +0100 Subject: [PATCH 002/138] Fixing all obvious warnings --- src/account_diff.rs | 2 +- src/block.rs | 8 +++++--- src/blockchain.rs | 5 ++--- src/ethereum/ethash.rs | 10 +++++----- src/evm/factory.rs | 7 ++++--- src/evm/interpreter.rs | 2 ++ src/lib.rs | 3 ++- src/spec.rs | 2 +- src/sync/chain.rs | 28 ++++++++++------------------ src/sync/io.rs | 4 ++-- src/sync/range_collection.rs | 4 ++-- src/views.rs | 2 +- 12 files changed, 37 insertions(+), 40 deletions(-) diff --git a/src/account_diff.rs b/src/account_diff.rs index 05c4d00a6..fece8c5d7 100644 --- a/src/account_diff.rs +++ b/src/account_diff.rs @@ -66,7 +66,7 @@ impl AccountDiff { post.storage.get(&k).cloned().unwrap_or(H256::new()) ))).collect(), }; - if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.len() == 0 { + if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() { None } else { Some(r) diff --git a/src/block.rs b/src/block.rs index e5ca18e8c..eacf30b44 100644 --- a/src/block.rs +++ b/src/block.rs @@ -1,3 +1,5 @@ +#![allow(ptr_arg)] // Because of &LastHashes -> &Vec<_> + use common::*; use engine::*; use state::*; @@ -169,7 +171,7 @@ impl<'x, 'y> OpenBlock<'x, 'y> { timestamp: self.block.header.timestamp, difficulty: self.block.header.difficulty.clone(), last_hashes: self.last_hashes.clone(), // TODO: should be a reference. - gas_used: self.block.archive.last().map(|t| t.receipt.gas_used).unwrap_or(U256::from(0)), + gas_used: self.block.archive.last().map_or(U256::zero(), |t| t.receipt.gas_used), gas_limit: self.block.header.gas_limit.clone(), } } @@ -200,7 +202,7 @@ impl<'x, 'y> OpenBlock<'x, 'y> { s.block.header.state_root = s.block.state.root().clone(); s.block.header.receipts_root = ordered_trie_root(s.block.archive.iter().map(|ref e| e.receipt.rlp_bytes()).collect()); s.block.header.log_bloom = s.block.archive.iter().fold(LogBloom::zero(), |mut b, e| {b |= &e.receipt.log_bloom; b}); - s.block.header.gas_used = s.block.archive.last().map(|t| t.receipt.gas_used).unwrap_or(U256::from(0)); + s.block.header.gas_used = s.block.archive.last().map_or(U256::zero(), |t| t.receipt.gas_used); s.block.header.note_dirty(); ClosedBlock::new(s, uncle_bytes) @@ -251,7 +253,7 @@ impl SealedBlock { let mut block_rlp = RlpStream::new_list(3); self.block.header.stream_rlp(&mut block_rlp, Seal::With); block_rlp.append_list(self.block.archive.len()); - for e in self.block.archive.iter() { e.transaction.rlp_append(&mut block_rlp); } + for e in &self.block.archive { e.transaction.rlp_append(&mut block_rlp); } block_rlp.append_raw(&self.uncle_bytes, 1); block_rlp.out() } diff --git a/src/blockchain.rs b/src/blockchain.rs index 3c44e8354..2f1bebf48 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -490,9 +490,8 @@ impl BlockChain { K: ExtrasSliceConvertable + Eq + Hash + Clone { { let read = cache.read().unwrap(); - match read.get(hash) { - Some(v) => return Some(v.clone()), - None => () + if let Some(v) = read.get(hash) { + return Some(v.clone()); } } diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index 8a86cf4e5..6ab49c1ac 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -31,13 +31,13 @@ impl Ethash { } fn u64_param(&self, name: &str) -> u64 { - *self.u64_params.write().unwrap().entry(name.to_string()).or_insert_with(|| - self.spec().engine_params.get(name).map(|a| decode(&a)).unwrap_or(0u64)) + *self.u64_params.write().unwrap().entry(name.to_owned()).or_insert_with(|| + self.spec().engine_params.get(name).map_or(0u64, |a| decode(&a))) } fn u256_param(&self, name: &str) -> U256 { - *self.u256_params.write().unwrap().entry(name.to_string()).or_insert_with(|| - self.spec().engine_params.get(name).map(|a| decode(&a)).unwrap_or(x!(0))) + *self.u256_params.write().unwrap().entry(name.to_owned()).or_insert_with(|| + self.spec().engine_params.get(name).map_or(x!(0), |a| decode(&a))) } } @@ -83,7 +83,7 @@ impl Engine for Ethash { /// Apply the block reward on finalisation of the block. /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). fn on_close_block(&self, block: &mut Block) { - let reward = self.spec().engine_params.get("blockReward").map(|a| decode(&a)).unwrap_or(U256::from(0u64)); + let reward = self.spec().engine_params.get("blockReward").map_or(U256::from(0u64), |a| decode(&a)); let fields = block.fields(); // Bestow block reward diff --git a/src/evm/factory.rs b/src/evm/factory.rs index 3dde4bb6d..5b4d545ac 100644 --- a/src/evm/factory.rs +++ b/src/evm/factory.rs @@ -65,10 +65,11 @@ impl Factory { fn jit() -> Box { unimplemented!() } - +} +impl Default for Factory { /// Returns jitvm factory #[cfg(feature = "jit")] - pub fn default() -> Factory { + fn default() -> Factory { Factory { evm: VMType::Jit } @@ -76,7 +77,7 @@ impl Factory { /// Returns native rust evm factory #[cfg(not(feature = "jit"))] - pub fn default() -> Factory { + fn default() -> Factory { Factory { evm: VMType::Interpreter } diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index 0fd546b34..dc382e90e 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -229,6 +229,7 @@ struct CodeReader<'a> { code: &'a Bytes } +#[allow(len_without_is_empty)] impl<'a> CodeReader<'a> { /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { @@ -331,6 +332,7 @@ impl evm::Evm for Interpreter { } impl Interpreter { + #[allow(cyclomatic_complexity)] fn get_gas_cost_mem(&self, ext: &evm::Ext, instruction: Instruction, diff --git a/src/lib.rs b/src/lib.rs index 8ae81fbf2..14626a62f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,8 @@ #![feature(wrapping)] //#![feature(plugin)] //#![plugin(interpolate_idents)] +#![allow(match_bool, needless_range_loop, match_ref_pats)] + //! Ethcore's ethereum implementation //! //! ### Rust version @@ -86,7 +88,6 @@ extern crate num_cpus; #[cfg(feature = "jit" )] extern crate evmjit; #[macro_use] -#[allow(match_bool)] extern crate ethcore_util as util; pub mod common; diff --git a/src/spec.rs b/src/spec.rs index 210b46dca..363fd9a49 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -200,7 +200,7 @@ impl FromJson for Spec { Spec { - name: json.find("name").map(|j| j.as_string().unwrap()).unwrap_or("unknown").to_owned(), + name: json.find("name").map_or("unknown", |j| j.as_string().unwrap()).to_owned(), engine_name: json["engineName"].as_string().unwrap().to_owned(), engine_params: json_to_rlp_map(&json["params"]), builtins: builtins, diff --git a/src/sync/chain.rs b/src/sync/chain.rs index 795ed4f0e..e95e51ba2 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -268,6 +268,7 @@ impl ChainSync { Ok(()) } + #[allow(cyclomatic_complexity)] /// Called by peer once it has new block headers during sync fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); @@ -865,12 +866,9 @@ impl ChainSync { let mut added = 0usize; let mut data = Bytes::new(); for i in 0..count { - match io.chain().block_body(&try!(r.val_at::(i))) { - Some(mut hdr) => { - data.append(&mut hdr); - added += 1; - } - None => {} + if let Some(mut hdr) = io.chain().block_body(&try!(r.val_at::(i))) { + data.append(&mut hdr); + added += 1; } } let mut rlp = RlpStream::new_list(added); @@ -892,12 +890,9 @@ impl ChainSync { let mut added = 0usize; let mut data = Bytes::new(); for i in 0..count { - match io.chain().state_data(&try!(r.val_at::(i))) { - Some(mut hdr) => { - data.append(&mut hdr); - added += 1; - } - None => {} + if let Some(mut hdr) = io.chain().state_data(&try!(r.val_at::(i))) { + data.append(&mut hdr); + added += 1; } } let mut rlp = RlpStream::new_list(added); @@ -918,12 +913,9 @@ impl ChainSync { let mut added = 0usize; let mut data = Bytes::new(); for i in 0..count { - match io.chain().block_receipts(&try!(r.val_at::(i))) { - Some(mut hdr) => { - data.append(&mut hdr); - added += 1; - } - None => {} + if let Some(mut hdr) = io.chain().block_receipts(&try!(r.val_at::(i))) { + data.append(&mut hdr); + added += 1; } } let mut rlp = RlpStream::new_list(added); diff --git a/src/sync/io.rs b/src/sync/io.rs index affcbc0d7..aa572c133 100644 --- a/src/sync/io.rs +++ b/src/sync/io.rs @@ -14,7 +14,7 @@ pub trait SyncIo { /// Send a packet to a peer. fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError>; /// Get the blockchain - fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; + fn chain(&mut self) -> &mut BlockChainClient; /// Returns peer client identifier string fn peer_info(&self, peer_id: PeerId) -> String { peer_id.to_string() @@ -50,7 +50,7 @@ impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> { self.network.send(peer_id, packet_id, data) } - fn chain<'a>(&'a mut self) -> &'a mut BlockChainClient { + fn chain(&mut self) -> &mut BlockChainClient { self.chain } diff --git a/src/sync/range_collection.rs b/src/sync/range_collection.rs index d212625be..822056ff4 100644 --- a/src/sync/range_collection.rs +++ b/src/sync/range_collection.rs @@ -29,7 +29,7 @@ pub trait RangeCollection { /// Remove all elements >= `tail` fn insert_item(&mut self, key: K, value: V); /// Get an iterator over ranges - fn range_iter<'c>(&'c self) -> RangeIterator<'c, K, V>; + fn range_iter(& self) -> RangeIterator; } /// Range iterator. For each range yelds a key for the first element of the range and a vector of values. @@ -60,7 +60,7 @@ impl<'c, K:'c, V:'c> Iterator for RangeIterator<'c, K, V> where K: Add RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + Add + Sub + Copy + FromUsize + ToUsize { - fn range_iter<'c>(&'c self) -> RangeIterator<'c, K, V> { + fn range_iter(&self) -> RangeIterator { RangeIterator { range: self.len(), collection: self diff --git a/src/views.rs b/src/views.rs index 56e613f06..e1c704625 100644 --- a/src/views.rs +++ b/src/views.rs @@ -156,7 +156,7 @@ impl<'a> BlockView<'a> { /// Return List of transactions in given block. pub fn uncle_views(&self) -> Vec { - self.rlp.at(2).iter().map(|rlp| HeaderView::new_from_rlp(rlp)).collect() + self.rlp.at(2).iter().map(HeaderView::new_from_rlp).collect() } /// Return list of uncle hashes of given block. From fc4b67a12d04143ca49869f9c2f8afc7b8c102b6 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 11:15:39 +0100 Subject: [PATCH 003/138] Fixing self_convention errors --- src/ethereum/ethash.rs | 1 + src/executive.rs | 6 +++--- src/spec.rs | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index 6ab49c1ac..13d7b1af3 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -152,6 +152,7 @@ impl Engine for Ethash { } } +#[allow(wrong_self_convention)] // to_ethash should take self impl Ethash { fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { const EXP_DIFF_PERIOD: u64 = 100000; diff --git a/src/executive.rs b/src/executive.rs index e0f35d712..7671f3cc9 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -75,7 +75,7 @@ impl<'a> Executive<'a> { } /// Creates `Externalities` from `Executive`. - pub fn to_externalities<'_>(&'_ mut self, origin_info: OriginInfo, substate: &'_ mut Substate, output: OutputPolicy<'_>) -> Externalities { + pub fn as_externalities<'_>(&'_ mut self, origin_info: OriginInfo, substate: &'_ mut Substate, output: OutputPolicy<'_>) -> Externalities { Externalities::new(self.state, self.info, self.engine, self.depth, origin_info, substate, output) } @@ -198,7 +198,7 @@ impl<'a> Executive<'a> { let mut unconfirmed_substate = Substate::new(); let res = { - let mut ext = self.to_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::Return(output)); + let mut ext = self.as_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::Return(output)); self.engine.vm_factory().create().exec(params, &mut ext) }; @@ -230,7 +230,7 @@ impl<'a> Executive<'a> { self.state.transfer_balance(¶ms.sender, ¶ms.address, ¶ms.value); let res = { - let mut ext = self.to_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::InitContract); + let mut ext = self.as_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::InitContract); self.engine.vm_factory().create().exec(params, &mut ext) }; self.enact_result(&res, substate, unconfirmed_substate, backup); diff --git a/src/spec.rs b/src/spec.rs index 363fd9a49..95f2d9a23 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -93,10 +93,10 @@ pub struct Spec { state_root_memo: RwLock>, } +#[allow(wrong_self_convention)] // because to_engine(self) should be to_engine(&self) impl Spec { /// Convert this object into a boxed Engine of the right underlying type. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. - #[allow(wrong_self_convention)] pub fn to_engine(self) -> Result, Error> { match self.engine_name.as_ref() { "NullEngine" => Ok(NullEngine::new_boxed(self)), From 062193ceb59fc3128a93f4503d2564e3b4a5657b Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 12:14:29 +0100 Subject: [PATCH 004/138] Fixing clippy errors in util --- README.md | 4 +++ util/src/bytes.rs | 14 +++++------ util/src/chainfilter.rs | 7 +++--- util/src/hash.rs | 26 +++++++++---------- util/src/io/service.rs | 12 ++++----- util/src/json_aid.rs | 22 ++++++++-------- util/src/lib.rs | 1 + util/src/misc.rs | 6 ++--- util/src/network/connection.rs | 9 +++---- util/src/network/discovery.rs | 13 +++++----- util/src/network/handshake.rs | 18 +++++-------- util/src/network/host.rs | 46 +++++++++++++++++----------------- util/src/network/node.rs | 6 +++-- util/src/network/session.rs | 4 +-- util/src/overlaydb.rs | 11 +++----- util/src/rlp/rlpstream.rs | 6 ++--- util/src/rlp/rlptraits.rs | 6 ++--- util/src/rlp/untrusted_rlp.rs | 9 ++++--- util/src/squeeze.rs | 4 +-- util/src/trie/triedb.rs | 12 ++++----- util/src/trie/triedbmut.rs | 15 ++++++----- util/src/uint.rs | 14 +++++------ 22 files changed, 128 insertions(+), 137 deletions(-) diff --git a/README.md b/README.md index 216ac8091..48172bb60 100644 --- a/README.md +++ b/README.md @@ -1 +1,5 @@ # ethcore + + +# Running clippy + diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 479a91df0..a954d8acd 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -99,18 +99,18 @@ impl<'a> Deref for BytesRef<'a> { type Target = [u8]; fn deref(&self) -> &[u8] { - match self { - &BytesRef::Flexible(ref bytes) => bytes, - &BytesRef::Fixed(ref bytes) => bytes + match *self { + BytesRef::Flexible(ref bytes) => bytes, + BytesRef::Fixed(ref bytes) => bytes } } } impl <'a> DerefMut for BytesRef<'a> { fn deref_mut(&mut self) -> &mut [u8] { - match self { - &mut BytesRef::Flexible(ref mut bytes) => bytes, - &mut BytesRef::Fixed(ref mut bytes) => bytes + match *self { + BytesRef::Flexible(ref mut bytes) => bytes, + BytesRef::Fixed(ref mut bytes) => bytes } } } @@ -283,7 +283,7 @@ pub trait FromBytes: Sized { impl FromBytes for String { fn from_bytes(bytes: &[u8]) -> FromBytesResult { - Ok(::std::str::from_utf8(bytes).unwrap().to_string()) + Ok(::std::str::from_utf8(bytes).unwrap().to_owned()) } } diff --git a/util/src/chainfilter.rs b/util/src/chainfilter.rs index e1804c191..386664837 100644 --- a/util/src/chainfilter.rs +++ b/util/src/chainfilter.rs @@ -321,10 +321,9 @@ impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource let offset = level_size * index; // go doooown! - match self.blocks(bloom, from_block, to_block, max_level, offset) { - Some(blocks) => result.extend(blocks), - None => () - }; + if let Some(blocks) = self.blocks(bloom, from_block, to_block, max_level, offset) { + result.extend(blocks); + } } result diff --git a/util/src/hash.rs b/util/src/hash.rs index 17057ef07..793924f8f 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -193,11 +193,11 @@ macro_rules! impl_hash { impl FromJson for $from { fn from_json(json: &Json) -> Self { - match json { - &Json::String(ref s) => { + match *json { + Json::String(ref s) => { match s.len() % 2 { 0 => FromStr::from_str(clean_0x(s)).unwrap(), - _ => FromStr::from_str(&("0".to_string() + &(clean_0x(s).to_string()))[..]).unwrap() + _ => FromStr::from_str(&("0".to_owned() + &(clean_0x(s).to_owned()))[..]).unwrap() } }, _ => Default::default(), @@ -207,7 +207,7 @@ macro_rules! impl_hash { impl fmt::Debug for $from { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for i in self.0.iter() { + for i in &self.0[..] { try!(write!(f, "{:02x}", i)); } Ok(()) @@ -215,11 +215,11 @@ macro_rules! impl_hash { } impl fmt::Display for $from { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for i in self.0[0..2].iter() { + for i in &self.0[0..2] { try!(write!(f, "{:02x}", i)); } try!(write!(f, "…")); - for i in self.0[$size - 4..$size].iter() { + for i in &self.0[$size - 4..$size] { try!(write!(f, "{:02x}", i)); } Ok(()) @@ -277,36 +277,36 @@ macro_rules! impl_hash { impl Index for $from { type Output = u8; - fn index<'a>(&'a self, index: usize) -> &'a u8 { + fn index(&self, index: usize) -> &u8 { &self.0[index] } } impl IndexMut for $from { - fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut u8 { + fn index_mut(&mut self, index: usize) -> &mut u8 { &mut self.0[index] } } impl Index> for $from { type Output = [u8]; - fn index<'a>(&'a self, index: ops::Range) -> &'a [u8] { + fn index(&self, index: ops::Range) -> &[u8] { &self.0[index] } } impl IndexMut> for $from { - fn index_mut<'a>(&'a mut self, index: ops::Range) -> &'a mut [u8] { + fn index_mut(&mut self, index: ops::Range) -> &mut [u8] { &mut self.0[index] } } impl Index for $from { type Output = [u8]; - fn index<'a>(&'a self, _index: ops::RangeFull) -> &'a [u8] { + fn index(&self, _index: ops::RangeFull) -> &[u8] { &self.0 } } impl IndexMut for $from { - fn index_mut<'a>(&'a mut self, _index: ops::RangeFull) -> &'a mut [u8] { + fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [u8] { &mut self.0 } } @@ -424,7 +424,7 @@ macro_rules! impl_hash { fn from(s: &'_ str) -> $from { use std::str::FromStr; if s.len() % 2 == 1 { - $from::from_str(&("0".to_string() + &(clean_0x(s).to_string()))[..]).unwrap_or($from::new()) + $from::from_str(&("0".to_owned() + &(clean_0x(s).to_owned()))[..]).unwrap_or($from::new()) } else { $from::from_str(clean_0x(s)).unwrap_or($from::new()) } diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 4a96d19a7..537f26b8f 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -93,17 +93,17 @@ impl Handler for IoManager where Message: Send + 'static { fn ready(&mut self, event_loop: &mut EventLoop, token: Token, events: EventSet) { if events.is_hup() { - for h in self.handlers.iter_mut() { + for h in &mut self.handlers { h.stream_hup(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); } } else if events.is_readable() { - for h in self.handlers.iter_mut() { + for h in &mut self.handlers { h.stream_readable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); } } else if events.is_writable() { - for h in self.handlers.iter_mut() { + for h in &mut self.handlers { h.stream_writable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); } } @@ -116,13 +116,13 @@ impl Handler for IoManager where Message: Send + 'static { let timer = self.timers.get_mut(token).expect("Unknown user timer token"); timer.delay }; - for h in self.handlers.iter_mut() { + for h in &mut self.handlers { h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); } event_loop.timeout_ms(token, delay).expect("Error re-registering user timer"); } _ => { // Just pass the event down. IoHandler is supposed to re-register it if required. - for h in self.handlers.iter_mut() { + for h in &mut self.handlers { h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); } } @@ -140,7 +140,7 @@ impl Handler for IoManager where Message: Send + 'static { self.handlers.last_mut().unwrap().initialize(&mut IoContext::new(event_loop, &mut self.timers)); }, IoMessage::UserMessage(ref mut data) => { - for h in self.handlers.iter_mut() { + for h in &mut self.handlers { h.message(&mut IoContext::new(event_loop, &mut self.timers), data); } } diff --git a/util/src/json_aid.rs b/util/src/json_aid.rs index 79a71cac6..417017c31 100644 --- a/util/src/json_aid.rs +++ b/util/src/json_aid.rs @@ -18,10 +18,10 @@ fn u256_from_str(s: &str) -> U256 { impl FromJson for Bytes { fn from_json(json: &Json) -> Self { - match json { - &Json::String(ref s) => match s.len() % 2 { + match *json { + Json::String(ref s) => match s.len() % 2 { 0 => FromHex::from_hex(clean(s)).unwrap_or(vec![]), - _ => FromHex::from_hex(&("0".to_string() + &(clean(s).to_string()))[..]).unwrap_or(vec![]), + _ => FromHex::from_hex(&("0".to_owned() + &(clean(s).to_owned()))[..]).unwrap_or(vec![]), }, _ => vec![], } @@ -30,8 +30,8 @@ impl FromJson for Bytes { impl FromJson for BTreeMap { fn from_json(json: &Json) -> Self { - match json { - &Json::Object(ref o) => o.iter().map(|(key, value)| (x!(&u256_from_str(key)), x!(&U256::from_json(value)))).collect(), + match *json { + Json::Object(ref o) => o.iter().map(|(key, value)| (x!(&u256_from_str(key)), x!(&U256::from_json(value)))).collect(), _ => BTreeMap::new(), } } @@ -39,8 +39,8 @@ impl FromJson for BTreeMap { impl FromJson for Vec where T: FromJson { fn from_json(json: &Json) -> Self { - match json { - &Json::Array(ref o) => o.iter().map(|x|T::from_json(x)).collect(), + match *json { + Json::Array(ref o) => o.iter().map(|x|T::from_json(x)).collect(), _ => Vec::new(), } } @@ -48,9 +48,9 @@ impl FromJson for Vec where T: FromJson { impl FromJson for Option where T: FromJson { fn from_json(json: &Json) -> Self { - match json { - &Json::String(ref o) if o.is_empty() => None, - &Json::Null => None, + match *json { + Json::String(ref o) if o.is_empty() => None, + Json::Null => None, _ => Some(FromJson::from_json(json)), } } @@ -134,4 +134,4 @@ fn option_types() { assert_eq!(None, v); let v: Option = xjson!(&j["empty"]); assert_eq!(None, v); -} \ No newline at end of file +} diff --git a/util/src/lib.rs b/util/src/lib.rs index 4bc47e61c..ffa091c37 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -2,6 +2,7 @@ #![feature(augmented_assignments)] #![feature(associated_consts)] #![feature(wrapping)] +#![allow(needless_range_loop, match_bool)] //! Ethcore-util library //! //! ### Rust version: diff --git a/util/src/misc.rs b/util/src/misc.rs index e5efd33bb..d540ccf5b 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -14,13 +14,13 @@ impl Diff where T: Eq { pub fn new(pre: T, post: T) -> Self { if pre == post { Diff::Same } else { Diff::Changed(pre, post) } } /// Get the before value, if there is one. - pub fn pre(&self) -> Option<&T> { match self { &Diff::Died(ref x) | &Diff::Changed(ref x, _) => Some(x), _ => None } } + pub fn pre(&self) -> Option<&T> { match *self { Diff::Died(ref x) | Diff::Changed(ref x, _) => Some(x), _ => None } } /// Get the after value, if there is one. - pub fn post(&self) -> Option<&T> { match self { &Diff::Born(ref x) | &Diff::Changed(_, ref x) => Some(x), _ => None } } + pub fn post(&self) -> Option<&T> { match *self { Diff::Born(ref x) | Diff::Changed(_, ref x) => Some(x), _ => None } } /// Determine whether there was a change or not. - pub fn is_same(&self) -> bool { match self { &Diff::Same => true, _ => false }} + pub fn is_same(&self) -> bool { match *self { Diff::Same => true, _ => false }} } #[derive(PartialEq,Eq,Clone,Copy)] diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index f11c10384..6127cf838 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -86,7 +86,7 @@ impl Connection { /// Add a packet to send queue. pub fn send(&mut self, data: Bytes) { - if data.len() != 0 { + if !data.is_empty() { self.send_queue.push_back(Cursor::new(data)); } if !self.interest.is_writable() { @@ -341,11 +341,8 @@ impl EncryptedConnection { self.idle_timeout.map(|t| event_loop.clear_timeout(t)); match self.read_state { EncryptedConnectionState::Header => { - match try!(self.connection.readable()) { - Some(data) => { - try!(self.read_header(&data)); - }, - None => {} + if let Some(data) = try!(self.connection.readable()) { + try!(self.read_header(&data)); }; Ok(None) }, diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index d2a4f21a2..1f2bd4e06 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -62,7 +62,7 @@ impl Discovery { discovery_round: 0, discovery_id: NodeId::new(), discovery_nodes: HashSet::new(), - node_buckets: (0..NODE_BINS).map(|x| NodeBucket::new(x)).collect(), + node_buckets: (0..NODE_BINS).map(NodeBucket::new).collect(), } } @@ -122,7 +122,8 @@ impl Discovery { ret } - fn nearest_node_entries<'b>(source: &NodeId, target: &NodeId, buckets: &'b Vec) -> Vec<&'b NodeId> + #[allow(cyclomatic_complexity)] + fn nearest_node_entries<'b>(source: &NodeId, target: &NodeId, buckets: &'b [NodeBucket]) -> Vec<&'b NodeId> { // send ALPHA FindNode packets to nodes we know, closest to target const LAST_BIN: u32 = NODE_BINS - 1; @@ -136,7 +137,7 @@ impl Discovery { if head > 1 && tail != LAST_BIN { while head != tail && head < NODE_BINS && count < BUCKET_SIZE { - for n in buckets[head as usize].nodes.iter() + for n in &buckets[head as usize].nodes { if count < BUCKET_SIZE { count += 1; @@ -147,7 +148,7 @@ impl Discovery { } } if count < BUCKET_SIZE && tail != 0 { - for n in buckets[tail as usize].nodes.iter() { + for n in &buckets[tail as usize].nodes { if count < BUCKET_SIZE { count += 1; found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n); @@ -166,7 +167,7 @@ impl Discovery { } else if head < 2 { while head < NODE_BINS && count < BUCKET_SIZE { - for n in buckets[head as usize].nodes.iter() { + for n in &buckets[head as usize].nodes { if count < BUCKET_SIZE { count += 1; found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n); @@ -180,7 +181,7 @@ impl Discovery { } else { while tail > 0 && count < BUCKET_SIZE { - for n in buckets[tail as usize].nodes.iter() { + for n in &buckets[tail as usize].nodes { if count < BUCKET_SIZE { count += 1; found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n); diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index ca95808b4..e7a669e25 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -93,21 +93,15 @@ impl Handshake { self.idle_timeout.map(|t| event_loop.clear_timeout(t)); match self.state { HandshakeState::ReadingAuth => { - match try!(self.connection.readable()) { - Some(data) => { - try!(self.read_auth(host, &data)); - try!(self.write_ack()); - }, - None => {} + if let Some(data) = try!(self.connection.readable()) { + try!(self.read_auth(host, &data)); + try!(self.write_ack()); }; }, HandshakeState::ReadingAck => { - match try!(self.connection.readable()) { - Some(data) => { - try!(self.read_ack(host, &data)); - self.state = HandshakeState::StartSession; - }, - None => {} + if let Some(data) = try!(self.connection.readable()) { + try!(self.read_ack(host, &data)); + self.state = HandshakeState::StartSession; }; }, _ => { panic!("Unexpected state"); } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 37b58f1f0..060fd5217 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -175,7 +175,7 @@ impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 's s.info.client_version.clone() }, _ => { - "unknown".to_string() + "unknown".to_owned() } } } @@ -213,7 +213,7 @@ impl HostInfo { /// Increments and returns connection nonce. pub fn next_nonce(&mut self) -> H256 { self.nonce = self.nonce.sha3(); - return self.nonce.clone(); + self.nonce.clone() } } @@ -246,7 +246,7 @@ impl Host where Message: Send { config: config, nonce: H256::random(), protocol_version: 4, - client_version: "parity".to_string(), + client_version: "parity".to_owned(), listen_port: 0, capabilities: Vec::new(), }, @@ -274,11 +274,11 @@ impl Host where Message: Send { } fn have_session(&self, id: &NodeId) -> bool { - self.connections.iter().any(|e| match e { &ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false }) + self.connections.iter().any(|e| match *e { ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false }) } fn connecting_to(&self, id: &NodeId) -> bool { - self.connections.iter().any(|e| match e { &ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false }) + self.connections.iter().any(|e| match *e { ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false }) } fn connect_peers(&mut self, io: &mut IoContext>) { @@ -303,7 +303,7 @@ impl Host where Message: Send { } } - for n in to_connect.iter() { + for n in &to_connect { if n.peer_type == PeerType::Required { if req_conn < IDEAL_PEERS { self.connect_peer(&n.id, io); @@ -318,7 +318,7 @@ impl Host where Message: Send { let peer_count = 0; let mut open_slots = IDEAL_PEERS - peer_count - pending_count + req_conn; if open_slots > 0 { - for n in to_connect.iter() { + for n in &to_connect { if n.peer_type == PeerType::Optional && open_slots > 0 { open_slots -= 1; self.connect_peer(&n.id, io); @@ -328,6 +328,7 @@ impl Host where Message: Send { } } + #[allow(single_match)] fn connect_peer(&mut self, id: &NodeId, io: &mut IoContext>) { if self.have_session(id) { @@ -376,6 +377,7 @@ impl Host where Message: Send { trace!(target: "net", "accept"); } + #[allow(single_match)] fn connection_writable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { let mut kill = false; let mut create_session = false; @@ -436,7 +438,7 @@ impl Host where Message: Send { }) }; match sd { SessionData::Ready => { - for (p, _) in self.handlers.iter_mut() { + for (p, _) in &mut self.handlers { if s.have_capability(p) { ready_data.push(p); } @@ -475,11 +477,8 @@ impl Host where Message: Send { h.read(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token, packet_id, &data[1..]); } - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); - }, - _ => (), + if let Some(&mut ConnectionEntry::Session(ref mut s)) = self.connections.get_mut(token) { + s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); } } @@ -523,7 +522,7 @@ impl Host where Message: Send { match self.connections.get_mut(token) { Some(&mut ConnectionEntry::Handshake(_)) => (), // just abandon handshake Some(&mut ConnectionEntry::Session(ref mut s)) if s.is_ready() => { - for (p, _) in self.handlers.iter_mut() { + for (p, _) in &mut self.handlers { if s.have_capability(p) { to_disconnect.push(p); } @@ -600,19 +599,20 @@ impl IoHandler> for Host where Messa FIRST_CONNECTION ... LAST_CONNECTION => self.connection_timeout(token, io), NODETABLE_DISCOVERY => {}, NODETABLE_MAINTAIN => {}, - _ => match self.timers.get_mut(&token).map(|p| *p) { - Some(protocol) => match self.handlers.get_mut(protocol) { + _ => { + if let Some(protocol) = self.timers.get_mut(&token).map(|p| *p) { + match self.handlers.get_mut(protocol) { None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) }, Some(h) => { h.timeout(&mut NetworkContext::new(io, protocol, Some(token), &mut self.connections, &mut self.timers), token); } - }, - None => {} // time not registerd through us + }; + } // else time not registerd through us } } } fn message<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, message: &'s mut NetworkIoMessage) { - match message { - &mut NetworkIoMessage::AddHandler { + match *message { + NetworkIoMessage::AddHandler { ref mut handler, ref protocol, ref versions @@ -624,7 +624,7 @@ impl IoHandler> for Host where Messa self.info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 }); } }, - &mut NetworkIoMessage::Send { + NetworkIoMessage::Send { ref peer, ref packet_id, ref protocol, @@ -641,8 +641,8 @@ impl IoHandler> for Host where Messa } } }, - &mut NetworkIoMessage::User(ref message) => { - for (p, h) in self.handlers.iter_mut() { + NetworkIoMessage::User(ref message) => { + for (p, h) in &mut self.handlers { h.message(&mut NetworkContext::new(io, p, None, &mut self.connections, &mut self.timers), &message); } } diff --git a/util/src/network/node.rs b/util/src/network/node.rs index 5c08e0a66..fd27e58e2 100644 --- a/util/src/network/node.rs +++ b/util/src/network/node.rs @@ -20,14 +20,16 @@ pub struct NodeEndpoint { pub udp_port: u16 } -impl NodeEndpoint { +impl FromStr for NodeEndpoint { + type Err = UtilError; + /// Create endpoint from string. Performs name resolution if given a host name. fn from_str(s: &str) -> Result { let address = s.to_socket_addrs().map(|mut i| i.next()); match address { Ok(Some(a)) => Ok(NodeEndpoint { address: a, - address_str: s.to_string(), + address_str: s.to_owned(), udp_port: a.port() }), Ok(_) => Err(UtilError::AddressResolve(None)), diff --git a/util/src/network/session.rs b/util/src/network/session.rs index 828e4b062..d20448fdd 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -182,7 +182,7 @@ impl Session { // map to protocol let protocol = self.info.capabilities[i].protocol; let pid = packet_id - self.info.capabilities[i].id_offset; - return Ok(SessionData::Packet { data: packet.data, protocol: protocol, packet_id: pid } ) + Ok(SessionData::Packet { data: packet.data, protocol: protocol, packet_id: pid } ) }, _ => { debug!(target: "net", "Unkown packet: {:?}", packet_id); @@ -212,7 +212,7 @@ impl Session { // Intersect with host capabilities // Leave only highset mutually supported capability version let mut caps: Vec = Vec::new(); - for hc in host.capabilities.iter() { + for hc in &host.capabilities { if peer_caps.iter().any(|c| c.protocol == hc.protocol && c.version == hc.version) { caps.push(SessionCapabilityInfo { protocol: hc.protocol, diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 1006cd28c..0bda2660d 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -159,7 +159,7 @@ impl HashDB for OverlayDB { match k { Some(&(ref d, rc)) if rc > 0 => Some(d), _ => { - let memrc = k.map(|&(_, rc)| rc).unwrap_or(0); + let memrc = k.map_or(0, |&(_, rc)| rc); match self.payload(key) { Some(x) => { let (d, rc) = x; @@ -184,16 +184,11 @@ impl HashDB for OverlayDB { match k { Some(&(_, rc)) if rc > 0 => true, _ => { - let memrc = k.map(|&(_, rc)| rc).unwrap_or(0); + let memrc = k.map_or(0, |&(_, rc)| rc); match self.payload(key) { Some(x) => { let (_, rc) = x; - if rc as i32 + memrc > 0 { - true - } - else { - false - } + rc as i32 + memrc > 0 } // Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done. //Some((d, rc)) if rc + memrc > 0 => true, diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index b8954ae6f..1487e3912 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -41,7 +41,7 @@ impl Stream for RlpStream { stream } - fn append<'a, E>(&'a mut self, object: &E) -> &'a mut RlpStream where E: Encodable { + fn append(&mut self, object: &E) -> &mut RlpStream where E: Encodable { // encode given value and add it at the end of the stream object.encode(&mut self.encoder); @@ -52,7 +52,7 @@ impl Stream for RlpStream { self } - fn append_list<'a>(&'a mut self, len: usize) -> &'a mut RlpStream { + fn append_list(&mut self, len: usize) -> &mut RlpStream { match len { 0 => { // we may finish, if the appended list len is equal 0 @@ -69,7 +69,7 @@ impl Stream for RlpStream { self } - fn append_empty_data<'a>(&'a mut self) -> &'a mut RlpStream { + fn append_empty_data(&mut self) -> &mut RlpStream { // self push raw item self.encoder.bytes.push(0x80); diff --git a/util/src/rlp/rlptraits.rs b/util/src/rlp/rlptraits.rs index 407d62daf..162f990fe 100644 --- a/util/src/rlp/rlptraits.rs +++ b/util/src/rlp/rlptraits.rs @@ -5,7 +5,7 @@ pub trait Decoder: Sized { where F: FnOnce(&[u8]) -> Result; fn as_list(&self) -> Result, DecoderError>; - fn as_rlp<'a>(&'a self) -> &'a UntrustedRlp<'a>; + fn as_rlp(&self) -> &UntrustedRlp; fn as_raw(&self) -> &[u8]; } @@ -231,7 +231,7 @@ pub trait Stream: Sized { /// assert_eq!(out, vec![0xca, 0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g', 0x80]); /// } /// ``` - fn append_list<'a>(&'a mut self, len: usize) -> &'a mut Self; + fn append_list(&mut self, len: usize) -> &mut Self; /// Apends null to the end of stream, chainable. /// @@ -246,7 +246,7 @@ pub trait Stream: Sized { /// assert_eq!(out, vec![0xc2, 0x80, 0x80]); /// } /// ``` - fn append_empty_data<'a>(&'a mut self) -> &'a mut Self; + fn append_empty_data(&mut self) -> &mut Self; /// Appends raw (pre-serialised) RLP data. Use with caution. Chainable. fn append_raw<'a>(&'a mut self, bytes: &[u8], item_count: usize) -> &'a mut Self; diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index 2bf33ba68..9c9246115 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -282,7 +282,7 @@ impl<'a> BasicDecoder<'a> { /// Return first item info fn payload_info(bytes: &[u8]) -> Result { - let item = match bytes.first().map(|&x| x) { + let item = match bytes.first().cloned() { None => return Err(DecoderError::RlpIsTooShort), Some(0...0x7f) => PayloadInfo::new(0, 1), Some(l @ 0x80...0xb7) => PayloadInfo::new(1, l as usize - 0x80), @@ -318,7 +318,7 @@ impl<'a> Decoder for BasicDecoder<'a> { let bytes = self.rlp.as_raw(); - match bytes.first().map(|&x| x) { + match bytes.first().cloned() { // rlp is too short None => Err(DecoderError::RlpIsTooShort), // single byt value @@ -349,12 +349,12 @@ impl<'a> Decoder for BasicDecoder<'a> { fn as_list(&self) -> Result, DecoderError> { let v: Vec> = self.rlp.iter() - .map(| i | BasicDecoder::new(i)) + .map(BasicDecoder::new) .collect(); Ok(v) } - fn as_rlp<'s>(&'s self) -> &'s UntrustedRlp<'s> { + fn as_rlp(&self) -> &UntrustedRlp { &self.rlp } } @@ -399,6 +399,7 @@ impl Decodable for Option where T: Decodable { macro_rules! impl_array_decodable { ($index_type:ty, $len:expr ) => ( impl Decodable for [T; $len] where T: Decodable { + #[allow(len_zero)] fn decode(decoder: &D) -> Result where D: Decoder { let decoders = try!(decoder.as_list()); diff --git a/util/src/squeeze.rs b/util/src/squeeze.rs index e81a13793..f83c5cee9 100644 --- a/util/src/squeeze.rs +++ b/util/src/squeeze.rs @@ -41,7 +41,7 @@ pub trait Squeeze { impl Squeeze for HashMap where K: Eq + Hash + Clone + HeapSizeOf, T: HeapSizeOf { fn squeeze(&mut self, size: usize) { - if self.len() == 0 { + if self.is_empty() { return } @@ -49,7 +49,7 @@ impl Squeeze for HashMap where K: Eq + Hash + Clone + HeapSizeOf, T: let all_entries = size_of_entry * self.len(); let mut shrinked_size = all_entries; - while self.len() > 0 && shrinked_size > size { + while !self.is_empty() && shrinked_size > size { // could be optimized let key = self.keys().next().unwrap().clone(); self.remove(&key); diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 9e4cf36e2..612bcdf7a 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -37,6 +37,7 @@ pub struct TrieDB<'db> { pub hash_count: usize, } +#[allow(wrong_self_convention)] impl<'db> TrieDB<'db> { /// Create a new trie with the backing database `db` and `root` /// Panics, if `root` does not exist @@ -102,7 +103,7 @@ impl<'db> TrieDB<'db> { match node { Node::Extension(_, payload) => handle_payload(payload), - Node::Branch(payloads, _) => for payload in payloads.iter() { handle_payload(payload) }, + Node::Branch(payloads, _) => for payload in &payloads { handle_payload(payload) }, _ => {}, } } @@ -140,12 +141,9 @@ impl<'db> TrieDB<'db> { }, Node::Branch(ref nodes, ref value) => { try!(writeln!(f, "")); - match value { - &Some(v) => { - try!(self.fmt_indent(f, deepness + 1)); - try!(writeln!(f, "=: {:?}", v.pretty())) - }, - &None => {} + if let Some(v) = *value { + try!(self.fmt_indent(f, deepness + 1)); + try!(writeln!(f, "=: {:?}", v.pretty())) } for i in 0..16 { match self.get_node(nodes[i]) { diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 0f3dde4fb..bd39f87ae 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -49,6 +49,7 @@ enum MaybeChanged<'a> { Changed(Bytes), } +#[allow(wrong_self_convention)] impl<'db> TrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. @@ -144,7 +145,7 @@ impl<'db> TrieDBMut<'db> { match node { Node::Extension(_, payload) => handle_payload(payload), - Node::Branch(payloads, _) => for payload in payloads.iter() { handle_payload(payload) }, + Node::Branch(payloads, _) => for payload in &payloads { handle_payload(payload) }, _ => {}, } } @@ -177,12 +178,9 @@ impl<'db> TrieDBMut<'db> { }, Node::Branch(ref nodes, ref value) => { try!(writeln!(f, "")); - match value { - &Some(v) => { - try!(self.fmt_indent(f, deepness + 1)); - try!(writeln!(f, "=: {:?}", v.pretty())) - }, - &None => {} + if let Some(v) = *value { + try!(self.fmt_indent(f, deepness + 1)); + try!(writeln!(f, "=: {:?}", v.pretty())) } for i in 0..16 { match self.get_node(nodes[i]) { @@ -330,6 +328,7 @@ impl<'db> TrieDBMut<'db> { } } + #[allow(cyclomatic_complexity)] /// Determine the RLP of the node, assuming we're inserting `partial` into the /// node currently of data `old`. This will *not* delete any hash of `old` from the database; /// it will just return the new RLP that includes the new node. @@ -704,7 +703,7 @@ mod tests { } fn unpopulate_trie<'a, 'db>(t: &mut TrieDBMut<'db>, v: &Vec<(Vec, Vec)>) { - for i in v.iter() { + for i in &v { let key: &[u8]= &i.0; t.remove(&key); } diff --git a/util/src/uint.rs b/util/src/uint.rs index ec70cddb2..3562a2b81 100644 --- a/util/src/uint.rs +++ b/util/src/uint.rs @@ -174,7 +174,7 @@ macro_rules! construct_uint { #[inline] fn byte(&self, index: usize) -> u8 { let &$name(ref arr) = self; - (arr[index / 8] >> ((index % 8)) * 8) as u8 + (arr[index / 8] >> (((index % 8)) * 8)) as u8 } fn to_bytes(&self, bytes: &mut[u8]) { @@ -328,16 +328,16 @@ macro_rules! construct_uint { impl FromJson for $name { fn from_json(json: &Json) -> Self { - match json { - &Json::String(ref s) => { + match *json { + Json::String(ref s) => { if s.len() >= 2 && &s[0..2] == "0x" { FromStr::from_str(&s[2..]).unwrap_or(Default::default()) } else { Uint::from_dec_str(s).unwrap_or(Default::default()) } }, - &Json::U64(u) => From::from(u), - &Json::I64(i) => From::from(i as u64), + Json::U64(u) => From::from(u), + Json::I64(i) => From::from(i as u64), _ => Uint::zero(), } } @@ -370,7 +370,7 @@ macro_rules! construct_uint { for i in 0..bytes.len() { let rev = bytes.len() - 1 - i; let pos = rev / 8; - ret[pos] += (bytes[i] as u64) << (rev % 8) * 8; + ret[pos] += (bytes[i] as u64) << ((rev % 8) * 8); } $name(ret) } @@ -382,7 +382,7 @@ macro_rules! construct_uint { fn from_str(value: &str) -> Result<$name, Self::Err> { let bytes: Vec = match value.len() % 2 == 0 { true => try!(value.from_hex()), - false => try!(("0".to_string() + value).from_hex()) + false => try!(("0".to_owned() + value).from_hex()) }; let bytes_ref: &[u8] = &bytes; From c746f0e62cb31a7af9e61669141be7ed32692f0b Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 13:47:30 +0100 Subject: [PATCH 005/138] Fixing all clippy lints --- Cargo.toml | 1 + src/account.rs | 4 ++-- src/account_diff.rs | 4 ++-- src/blockchain.rs | 3 ++- src/evm/interpreter.rs | 8 ++++---- src/evm/tests.rs | 6 +++--- src/externalities.rs | 2 +- src/lib.rs | 5 +++-- src/pod_state.rs | 8 ++++---- src/spec.rs | 2 +- src/sync/range_collection.rs | 1 + src/sync/tests.rs | 17 +++++++---------- src/tests/executive.rs | 6 +++--- src/tests/state.rs | 2 +- src/tests/test_common.rs | 4 ++-- src/tests/transaction.rs | 8 ++++---- src/verification.rs | 8 ++++---- util/Cargo.toml | 1 + util/src/hash.rs | 5 +++-- util/src/journaldb.rs | 2 +- util/src/json_aid.rs | 8 ++++---- util/src/lib.rs | 2 ++ util/src/rlp/tests.rs | 28 ++++++++++++++-------------- util/src/trie/triedbmut.rs | 14 +++++++------- util/src/uint.rs | 5 +++-- 25 files changed, 80 insertions(+), 74 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0583aa78f..a01bfd594 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ time = "0.1" evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" +clippy = "*" [features] jit = ["evmjit"] diff --git a/src/account.rs b/src/account.rs index c572b5589..b0fbf3f85 100644 --- a/src/account.rs +++ b/src/account.rs @@ -248,8 +248,8 @@ mod tests { let a = Account::from_rlp(&rlp); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); - assert_eq!(a.storage_at(&mut db, &H256::from(&U256::from(0x00u64))), H256::from(&U256::from(0x1234u64))); - assert_eq!(a.storage_at(&mut db, &H256::from(&U256::from(0x01u64))), H256::new()); + assert_eq!(a.storage_at(&db, &H256::from(&U256::from(0x00u64))), H256::from(&U256::from(0x1234u64))); + assert_eq!(a.storage_at(&db, &H256::from(&U256::from(0x01u64))), H256::new()); } #[test] diff --git a/src/account_diff.rs b/src/account_diff.rs index fece8c5d7..3b01da522 100644 --- a/src/account_diff.rs +++ b/src/account_diff.rs @@ -62,8 +62,8 @@ impl AccountDiff { code: Diff::new(pre.code.clone(), post.code.clone()), storage: storage.into_iter().map(|k| (k.clone(), Diff::new( - pre.storage.get(&k).cloned().unwrap_or(H256::new()), - post.storage.get(&k).cloned().unwrap_or(H256::new()) + pre.storage.get(&k).cloned().unwrap_or_else(H256::new), + post.storage.get(&k).cloned().unwrap_or_else(H256::new) ))).collect(), }; if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() { diff --git a/src/blockchain.rs b/src/blockchain.rs index 6700391ad..516f18230 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -179,7 +179,7 @@ impl BlockProvider for BlockChain { const COLLECTION_QUEUE_SIZE: usize = 2; const MIN_CACHE_SIZE: usize = 1; -const MAX_CACHE_SIZE: usize = 1024 * 1024 * 1; +const MAX_CACHE_SIZE: usize = 1024 * 1024; impl BlockChain { /// Create new instance of blockchain from given Genesis @@ -659,6 +659,7 @@ mod tests { } #[test] + #[allow(cyclomatic_complexity)] fn test_small_fork() { let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07dba07d6b448a186e9612e5f737d1c909dce473e53199901a302c00646d523c1a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a059262c330941f3fe2a34d16d6e3c7b30d2ceb37c6a0e9a994c494ee1a61d2410885aa4c8bf8e56e264c0c0".from_hex().unwrap(); let b1 = "f90261f901f9a05716670833ec874362d65fea27a7cd35af5897d275b31a44944113111e4e96d2a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cb52de543653d86ccd13ba3ddf8b052525b04231c6884a4db3188a184681d878a0e78628dd45a1f8dc495594d83b76c588a3ee67463260f8b7d4a42f574aeab29aa0e9244cf7503b79c03d3a099e07a80d2dbc77bb0b502d8a89d51ac0d68dd31313b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd882520884562791e580a051b3ecba4e3f2b49c11d42dd0851ec514b1be3138080f72a2b6e83868275d98f8877671f479c414b47f862f86080018304cb2f94095e7baea6a6c7c4c2dfeb977efac326af552d870a801ca09e2709d7ec9bbe6b1bbbf0b2088828d14cd5e8642a1fee22dc74bfa89761a7f9a04bd8813dee4be989accdb708b1c2e325a7e9c695a8024e30e89d6c644e424747c0".from_hex().unwrap(); diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index a3c8f6802..e21074531 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -718,7 +718,7 @@ impl Interpreter { let big_id = stack.pop_back(); let id = big_id.low_u64() as usize; let max = id.wrapping_add(32); - let data = params.data.clone().unwrap_or(vec![]); + let data = params.data.clone().unwrap_or_else(|| vec![]); let bound = cmp::min(data.len(), max); if id < bound && big_id < U256::from(data.len()) { let mut v = data[id..bound].to_vec(); @@ -729,7 +729,7 @@ impl Interpreter { } }, instructions::CALLDATASIZE => { - stack.push(U256::from(params.data.clone().unwrap_or(vec![]).len())); + stack.push(U256::from(params.data.clone().map_or(0, |l| l.len()))); }, instructions::CODESIZE => { stack.push(U256::from(code.len())); @@ -740,10 +740,10 @@ impl Interpreter { stack.push(U256::from(len)); }, instructions::CALLDATACOPY => { - self.copy_data_to_memory(mem, stack, ¶ms.data.clone().unwrap_or(vec![])); + self.copy_data_to_memory(mem, stack, ¶ms.data.clone().unwrap_or_else(|| vec![])); }, instructions::CODECOPY => { - self.copy_data_to_memory(mem, stack, ¶ms.code.clone().unwrap_or(vec![])); + self.copy_data_to_memory(mem, stack, ¶ms.code.clone().unwrap_or_else(|| vec![])); }, instructions::EXTCODECOPY => { let address = u256_to_address(&stack.pop_back()); diff --git a/src/evm/tests.rs b/src/evm/tests.rs index 8e1b5eff4..ef5987285 100644 --- a/src/evm/tests.rs +++ b/src/evm/tests.rs @@ -19,7 +19,7 @@ struct FakeExt { logs: Vec, _suicides: HashSet
, info: EnvInfo, - _schedule: Schedule + schedule: Schedule } impl FakeExt { @@ -89,7 +89,7 @@ impl Ext for FakeExt { } fn schedule(&self) -> &Schedule { - &self._schedule + &self.schedule } fn env_info(&self) -> &EnvInfo { @@ -122,7 +122,7 @@ fn test_stack_underflow() { }; match err { - evm::Error::StackUnderflow {instruction: _, wanted, on_stack} => { + evm::Error::StackUnderflow {wanted, on_stack, ..} => { assert_eq!(wanted, 2); assert_eq!(on_stack, 0); } diff --git a/src/externalities.rs b/src/externalities.rs index 9c07f005c..b2d716d37 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -158,7 +158,7 @@ impl<'a> Ext for Externalities<'a> { } fn extcode(&self, address: &Address) -> Bytes { - self.state.code(address).unwrap_or(vec![]) + self.state.code(address).unwrap_or_else(|| vec![]) } #[allow(match_ref_pats)] diff --git a/src/lib.rs b/src/lib.rs index 99a300924..f9795f324 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,8 +1,9 @@ #![feature(cell_extras)] #![feature(augmented_assignments)] -//#![feature(plugin)] +#![feature(plugin)] //#![plugin(interpolate_idents)] -#![allow(match_bool, needless_range_loop)] +#![plugin(clippy)] +#![allow(needless_range_loop, match_bool)] //! Ethcore's ethereum implementation //! diff --git a/src/pod_state.rs b/src/pod_state.rs index e3802c42a..68f057d9c 100644 --- a/src/pod_state.rs +++ b/src/pod_state.rs @@ -25,10 +25,10 @@ impl FromJson for PodState { let code = acc.find("code").map(&Bytes::from_json); if balance.is_some() || nonce.is_some() || storage.is_some() || code.is_some() { state.insert(address_from_hex(address), PodAccount{ - balance: balance.unwrap_or(U256::zero()), - nonce: nonce.unwrap_or(U256::zero()), - storage: storage.unwrap_or(BTreeMap::new()), - code: code.unwrap_or(Vec::new()) + balance: balance.unwrap_or_else(U256::zero), + nonce: nonce.unwrap_or_else(U256::zero), + storage: storage.unwrap_or_else(BTreeMap::new), + code: code.unwrap_or_else(Vec::new) }); } state diff --git a/src/spec.rs b/src/spec.rs index 4125191b5..2c112268f 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -176,7 +176,7 @@ impl FromJson for Spec { // let nonce = if let Some(&Json::String(ref n)) = acc.find("nonce") {U256::from_dec_str(n).unwrap_or(U256::from(0))} else {U256::from(0)}; // TODO: handle code & data if they exist. if balance.is_some() || nonce.is_some() { - state.insert(addr, GenesisAccount { balance: balance.unwrap_or(U256::from(0)), nonce: nonce.unwrap_or(U256::from(0)) }); + state.insert(addr, GenesisAccount { balance: balance.unwrap_or_else(U256::zero), nonce: nonce.unwrap_or_else(U256::zero) }); } } } diff --git a/src/sync/range_collection.rs b/src/sync/range_collection.rs index 822056ff4..b8186e5a5 100644 --- a/src/sync/range_collection.rs +++ b/src/sync/range_collection.rs @@ -191,6 +191,7 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } #[test] +#[allow(cyclomatic_complexity)] fn test_range() { use std::cmp::{Ordering}; diff --git a/src/sync/tests.rs b/src/sync/tests.rs index 05d7ac317..72c207f10 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -64,7 +64,7 @@ impl BlockChainClient for TestBlockChainClient { } fn block(&self, h: &H256) -> Option { - self.blocks.get(h).map(|b| b.clone()) + self.blocks.get(h).cloned() } fn block_status(&self, h: &H256) -> BlockStatus { @@ -208,7 +208,7 @@ impl<'p> SyncIo for TestIo<'p> { Ok(()) } - fn chain<'a>(&'a mut self) -> &'a mut BlockChainClient { + fn chain(&mut self) -> &mut BlockChainClient { self.chain } } @@ -265,14 +265,11 @@ impl TestNet { pub fn sync_step(&mut self) { for peer in 0..self.peers.len() { - match self.peers[peer].queue.pop_front() { - Some(packet) => { - let mut p = self.peers.get_mut(packet.recipient).unwrap(); - trace!("--- {} -> {} ---", peer, packet.recipient); - p.sync.on_packet(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); - trace!("----------------"); - }, - None => {} + if let Some(packet) = self.peers[peer].queue.pop_front() { + let mut p = self.peers.get_mut(packet.recipient).unwrap(); + trace!("--- {} -> {} ---", peer, packet.recipient); + p.sync.on_packet(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); + trace!("----------------"); } let mut p = self.peers.get_mut(peer).unwrap(); p.sync.maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None)); diff --git a/src/tests/executive.rs b/src/tests/executive.rs index fe428e199..d201f7fc5 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -168,7 +168,7 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { let mut fail = false; //let mut fail_unless = |cond: bool| if !cond && !fail { failed.push(name.to_string()); fail = true }; let mut fail_unless = |cond: bool, s: &str | if !cond && !fail { - failed.push(format!("[{}] {}: {}", vm, name.to_string(), s)); + failed.push(format!("[{}] {}: {}", vm, name, s)); fail = true }; @@ -245,7 +245,7 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { test.find("post").map(|pre| for (addr, s) in pre.as_object().unwrap() { let address = Address::from(addr.as_ref()); - fail_unless(state.code(&address).unwrap_or(vec![]) == Bytes::from_json(&s["code"]), "code is incorrect"); + fail_unless(state.code(&address).unwrap_or_else(|| vec![]) == Bytes::from_json(&s["code"]), "code is incorrect"); fail_unless(state.balance(&address) == xjson!(&s["balance"]), "balance is incorrect"); fail_unless(state.nonce(&address) == xjson!(&s["nonce"]), "nonce is incorrect"); BTreeMap::from_json(&s["storage"]).iter().foreach(|(k, v)| fail_unless(&state.storage_at(&address, &k) == v, "storage is incorrect")); @@ -266,7 +266,7 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { } - for f in failed.iter() { + for f in &failed { println!("FAILED: {:?}", f); } diff --git a/src/tests/state.rs b/src/tests/state.rs index 119e7037a..bebb37c88 100644 --- a/src/tests/state.rs +++ b/src/tests/state.rs @@ -15,7 +15,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { let mut fail = false; { let mut fail_unless = |cond: bool| if !cond && !fail { - failed.push(name.to_string()); + failed.push(name.clone()); flush(format!("FAIL\n")); fail = true; true diff --git a/src/tests/test_common.rs b/src/tests/test_common.rs index 15c5364c4..adb1c35d3 100644 --- a/src/tests/test_common.rs +++ b/src/tests/test_common.rs @@ -6,7 +6,7 @@ macro_rules! declare_test { #[test] #[allow(non_snake_case)] fn $id() { - assert!(do_json_test(include_bytes!(concat!("../../res/ethereum/tests/", $name, ".json"))).len() == 0); + assert!(do_json_test(include_bytes!(concat!("../../res/ethereum/tests/", $name, ".json"))).is_empty()); } }; } @@ -18,7 +18,7 @@ macro_rules! declare_test_ignore { #[ignore] #[allow(non_snake_case)] fn $id() { - assert!(do_json_test(include_bytes!(concat!("../../res/ethereum/tests/", $name, ".json"))).len() == 0); + assert!(do_json_test(include_bytes!(concat!("../../res/ethereum/tests/", $name, ".json"))).is_empty()); } }; } diff --git a/src/tests/transaction.rs b/src/tests/transaction.rs index d4836ca84..a798a38f3 100644 --- a/src/tests/transaction.rs +++ b/src/tests/transaction.rs @@ -9,13 +9,13 @@ fn do_json_test(json_data: &[u8]) -> Vec { let ot = RefCell::new(Transaction::new()); for (name, test) in json.as_object().unwrap() { let mut fail = false; - let mut fail_unless = |cond: bool| if !cond && !fail { failed.push(name.to_string()); println!("Transaction: {:?}", ot.borrow()); fail = true }; + let mut fail_unless = |cond: bool| if !cond && !fail { failed.push(name.clone()); println!("Transaction: {:?}", ot.borrow()); fail = true }; let schedule = match test.find("blocknumber") .and_then(|j| j.as_string()) .and_then(|s| BlockNumber::from_str(s).ok()) .unwrap_or(0) { x if x < 900000 => &old_schedule, _ => &new_schedule }; let rlp = Bytes::from_json(&test["rlp"]); - let res = UntrustedRlp::new(&rlp).as_val().map_err(|e| From::from(e)).and_then(|t: Transaction| t.validate(schedule, schedule.have_delegate_call)); + let res = UntrustedRlp::new(&rlp).as_val().map_err(From::from).and_then(|t: Transaction| t.validate(schedule, schedule.have_delegate_call)); fail_unless(test.find("transaction").is_none() == res.is_err()); if let (Some(&Json::Object(ref tx)), Some(&Json::String(ref expect_sender))) = (test.find("transaction"), test.find("sender")) { let t = res.unwrap(); @@ -30,11 +30,11 @@ fn do_json_test(json_data: &[u8]) -> Vec { fail_unless(to == &xjson!(&tx["to"])); } else { *ot.borrow_mut() = t.clone(); - fail_unless(Bytes::from_json(&tx["to"]).len() == 0); + fail_unless(Bytes::from_json(&tx["to"]).is_empty()); } } } - for f in failed.iter() { + for f in &failed { println!("FAILED: {:?}", f); } failed diff --git a/src/verification.rs b/src/verification.rs index 3d852dc3e..064c0b7d7 100644 --- a/src/verification.rs +++ b/src/verification.rs @@ -64,7 +64,7 @@ pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> /// Phase 3 verification. Check block information against parent and uncles. pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { // TODO: verify timestamp - let parent = try!(bc.block_header(&header.parent_hash).ok_or::(From::from(BlockError::UnknownParent(header.parent_hash.clone())))); + let parent = try!(bc.block_header(&header.parent_hash).ok_or_else(|| Error::from(BlockError::UnknownParent(header.parent_hash.clone())))); try!(verify_parent(&header, &parent)); try!(engine.verify_block_family(&header, &parent, Some(bytes))); @@ -122,7 +122,7 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, b // cB.p^7 -------------/ // cB.p^8 let mut expected_uncle_parent = header.parent_hash.clone(); - let uncle_parent = try!(bc.block_header(&uncle.parent_hash).ok_or::(From::from(BlockError::UnknownUncleParent(uncle.parent_hash.clone())))); + let uncle_parent = try!(bc.block_header(&uncle.parent_hash).ok_or_else(|| Error::from(BlockError::UnknownUncleParent(uncle.parent_hash.clone())))); for _ in 0..depth { match bc.block_details(&expected_uncle_parent) { Some(details) => { @@ -284,7 +284,7 @@ mod tests { /// Get raw block data fn block(&self, hash: &H256) -> Option { - self.blocks.get(hash).map(|b| b.clone()) + self.blocks.get(hash).cloned() } /// Get the familial details concerning a block. @@ -302,7 +302,7 @@ mod tests { /// Get the hash of given block's number. fn block_hash(&self, index: BlockNumber) -> Option { - self.numbers.get(&index).map(|h| h.clone()) + self.numbers.get(&index).cloned() } } diff --git a/util/Cargo.toml b/util/Cargo.toml index 02fdad17f..32f6eb17c 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -24,6 +24,7 @@ heapsize = "0.2" itertools = "0.4" slab = { git = "https://github.com/arkpar/slab.git" } sha3 = { path = "sha3" } +clippy = "*" [dev-dependencies] json-tests = { path = "json-tests" } diff --git a/util/src/hash.rs b/util/src/hash.rs index 793924f8f..bdd694cfc 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -424,9 +424,9 @@ macro_rules! impl_hash { fn from(s: &'_ str) -> $from { use std::str::FromStr; if s.len() % 2 == 1 { - $from::from_str(&("0".to_owned() + &(clean_0x(s).to_owned()))[..]).unwrap_or($from::new()) + $from::from_str(&("0".to_owned() + &(clean_0x(s).to_owned()))[..]).unwrap_or_else(|_| $from::new()) } else { - $from::from_str(clean_0x(s)).unwrap_or($from::new()) + $from::from_str(clean_0x(s)).unwrap_or_else(|_| $from::new()) } } } @@ -545,6 +545,7 @@ mod tests { use std::str::FromStr; #[test] + #[allow(eq_op)] fn hash() { let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h); diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index ada9c0d2b..477aa6071 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -96,7 +96,7 @@ impl JournalDB { })) { let rlp = Rlp::new(&rlp_data); let to_remove: Vec = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1}); - for i in to_remove.iter() { + for i in &to_remove { self.forward.remove(i); } try!(self.backing.delete(&last)); diff --git a/util/src/json_aid.rs b/util/src/json_aid.rs index 417017c31..c716b8991 100644 --- a/util/src/json_aid.rs +++ b/util/src/json_aid.rs @@ -10,9 +10,9 @@ pub fn clean(s: &str) -> &str { fn u256_from_str(s: &str) -> U256 { if s.len() >= 2 && &s[0..2] == "0x" { - U256::from_str(&s[2..]).unwrap_or(U256::from(0)) + U256::from_str(&s[2..]).unwrap_or_else(|_| U256::zero()) } else { - U256::from_dec_str(s).unwrap_or(U256::from(0)) + U256::from_dec_str(s).unwrap_or_else(|_| U256::zero()) } } @@ -20,8 +20,8 @@ impl FromJson for Bytes { fn from_json(json: &Json) -> Self { match *json { Json::String(ref s) => match s.len() % 2 { - 0 => FromHex::from_hex(clean(s)).unwrap_or(vec![]), - _ => FromHex::from_hex(&("0".to_owned() + &(clean(s).to_owned()))[..]).unwrap_or(vec![]), + 0 => FromHex::from_hex(clean(s)).unwrap_or_else(|_| vec![]), + _ => FromHex::from_hex(&("0".to_owned() + &(clean(s).to_owned()))[..]).unwrap_or_else(|_| vec![]), }, _ => vec![], } diff --git a/util/src/lib.rs b/util/src/lib.rs index 5f59aca8c..578a1d47a 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,6 +1,8 @@ #![feature(op_assign_traits)] #![feature(augmented_assignments)] #![feature(associated_consts)] +#![feature(plugin)] +#![plugin(clippy)] #![allow(needless_range_loop, match_bool)] //! Ethcore-util library //! diff --git a/util/src/rlp/tests.rs b/util/src/rlp/tests.rs index 7c2099124..f33cec177 100644 --- a/util/src/rlp/tests.rs +++ b/util/src/rlp/tests.rs @@ -15,25 +15,25 @@ fn rlp_at() { assert!(rlp.is_list()); //let animals = as rlp::Decodable>::decode_untrusted(&rlp).unwrap(); let animals: Vec = rlp.as_val().unwrap(); - assert_eq!(animals, vec!["cat".to_string(), "dog".to_string()]); + assert_eq!(animals, vec!["cat".to_owned(), "dog".to_owned()]); let cat = rlp.at(0).unwrap(); assert!(cat.is_data()); assert_eq!(cat.as_raw(), &[0x83, b'c', b'a', b't']); - //assert_eq!(String::decode_untrusted(&cat).unwrap(), "cat".to_string()); - assert_eq!(cat.as_val::().unwrap(), "cat".to_string()); + //assert_eq!(String::decode_untrusted(&cat).unwrap(), "cat".to_owned()); + assert_eq!(cat.as_val::().unwrap(), "cat".to_owned()); let dog = rlp.at(1).unwrap(); assert!(dog.is_data()); assert_eq!(dog.as_raw(), &[0x83, b'd', b'o', b'g']); - //assert_eq!(String::decode_untrusted(&dog).unwrap(), "dog".to_string()); - assert_eq!(dog.as_val::().unwrap(), "dog".to_string()); + //assert_eq!(String::decode_untrusted(&dog).unwrap(), "dog".to_owned()); + assert_eq!(dog.as_val::().unwrap(), "dog".to_owned()); let cat_again = rlp.at(0).unwrap(); assert!(cat_again.is_data()); assert_eq!(cat_again.as_raw(), &[0x83, b'c', b'a', b't']); - //assert_eq!(String::decode_untrusted(&cat_again).unwrap(), "cat".to_string()); - assert_eq!(cat_again.as_val::().unwrap(), "cat".to_string()); + //assert_eq!(String::decode_untrusted(&cat_again).unwrap(), "cat".to_owned()); + assert_eq!(cat_again.as_val::().unwrap(), "cat".to_owned()); } } @@ -268,13 +268,13 @@ fn decode_untrusted_u256() { #[test] fn decode_untrusted_str() { - let tests = vec![DTestPair("cat".to_string(), vec![0x83, b'c', b'a', b't']), - DTestPair("dog".to_string(), vec![0x83, b'd', b'o', b'g']), - DTestPair("Marek".to_string(), + let tests = vec![DTestPair("cat".to_owned(), vec![0x83, b'c', b'a', b't']), + DTestPair("dog".to_owned(), vec![0x83, b'd', b'o', b'g']), + DTestPair("Marek".to_owned(), vec![0x85, b'M', b'a', b'r', b'e', b'k']), - DTestPair("".to_string(), vec![0x80]), + DTestPair("".to_owned(), vec![0x80]), DTestPair("Lorem ipsum dolor sit amet, consectetur adipisicing elit" - .to_string(), + .to_owned(), vec![0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', b'o', b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', @@ -311,14 +311,14 @@ fn decode_untrusted_vector_u64() { #[test] fn decode_untrusted_vector_str() { - let tests = vec![DTestPair(vec!["cat".to_string(), "dog".to_string()], + let tests = vec![DTestPair(vec!["cat".to_owned(), "dog".to_owned()], vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])]; run_decode_tests(tests); } #[test] fn decode_untrusted_vector_of_vectors_str() { - let tests = vec![DTestPair(vec![vec!["cat".to_string()]], + let tests = vec![DTestPair(vec![vec!["cat".to_owned()]], vec![0xc5, 0xc4, 0x83, b'c', b'a', b't'])]; run_decode_tests(tests); } diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index bd39f87ae..f52c9bc1e 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -692,7 +692,7 @@ mod tests { } } - fn populate_trie<'db>(db: &'db mut HashDB, root: &'db mut H256, v: &Vec<(Vec, Vec)>) -> TrieDBMut<'db> { + fn populate_trie<'db>(db: &'db mut HashDB, root: &'db mut H256, v: &[(Vec, Vec)]) -> TrieDBMut<'db> { let mut t = TrieDBMut::new(db, root); for i in 0..v.len() { let key: &[u8]= &v[i].0; @@ -702,8 +702,8 @@ mod tests { t } - fn unpopulate_trie<'a, 'db>(t: &mut TrieDBMut<'db>, v: &Vec<(Vec, Vec)>) { - for i in &v { + fn unpopulate_trie<'db>(t: &mut TrieDBMut<'db>, v: &[(Vec, Vec)]) { + for i in v { let key: &[u8]= &i.0; t.remove(&key); } @@ -759,7 +759,7 @@ mod tests { println!("TRIE MISMATCH"); println!(""); println!("{:?} vs {:?}", memtrie.root(), real); - for i in x.iter() { + for i in &x { println!("{:?} -> {:?}", i.0.pretty(), i.1.pretty()); } println!("{:?}", memtrie); @@ -772,7 +772,7 @@ mod tests { println!(""); println!("remaining: {:?}", memtrie.db_items_remaining()); println!("{:?} vs {:?}", memtrie.root(), real); - for i in x.iter() { + for i in &x { println!("{:?} -> {:?}", i.0.pretty(), i.1.pretty()); } println!("{:?}", memtrie); @@ -1049,12 +1049,12 @@ mod tests { println!("TRIE MISMATCH"); println!(""); println!("ORIGINAL... {:?}", memtrie.root()); - for i in x.iter() { + for i in &x { println!("{:?} -> {:?}", i.0.pretty(), i.1.pretty()); } println!("{:?}", memtrie); println!("SORTED... {:?}", memtrie_sorted.root()); - for i in y.iter() { + for i in &y { println!("{:?} -> {:?}", i.0.pretty(), i.1.pretty()); } println!("{:?}", memtrie_sorted); diff --git a/util/src/uint.rs b/util/src/uint.rs index 84005d366..bd51aad40 100644 --- a/util/src/uint.rs +++ b/util/src/uint.rs @@ -437,9 +437,9 @@ macro_rules! construct_uint { match *json { Json::String(ref s) => { if s.len() >= 2 && &s[0..2] == "0x" { - FromStr::from_str(&s[2..]).unwrap_or(Default::default()) + FromStr::from_str(&s[2..]).unwrap_or_else(|_| Default::default()) } else { - Uint::from_dec_str(s).unwrap_or(Default::default()) + Uint::from_dec_str(s).unwrap_or_else(|_| Default::default()) } }, Json::U64(u) => From::from(u), @@ -1046,6 +1046,7 @@ mod tests { } #[test] + #[allow(eq_op)] pub fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); From 2a5a333fc0569b17aeaa6d5f63f2bd087d599b55 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 14:15:13 +0100 Subject: [PATCH 006/138] Explaining why we use * in version --- Cargo.toml | 2 +- util/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a01bfd594..58a63a37c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ time = "0.1" evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" -clippy = "*" +clippy = "*" # Always newest, since we use nightly [features] jit = ["evmjit"] diff --git a/util/Cargo.toml b/util/Cargo.toml index 32f6eb17c..38ccfe9f8 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -24,7 +24,7 @@ heapsize = "0.2" itertools = "0.4" slab = { git = "https://github.com/arkpar/slab.git" } sha3 = { path = "sha3" } -clippy = "*" +clippy = "*" # Always newest, since we use nightly [dev-dependencies] json-tests = { path = "json-tests" } From 82373ab7a412e45eea373f52d3035e4ffcdbe860 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 20 Jan 2016 04:19:38 +0100 Subject: [PATCH 007/138] inital commit with eth_blockNumber working --- Cargo.toml | 3 +++ src/bin/client/ethrpc.rs | 53 ++++++++++++++++++++++++++++++++++++++++ src/bin/client/main.rs | 15 ++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 src/bin/client/ethrpc.rs diff --git a/Cargo.toml b/Cargo.toml index 04c4bf956..32be356c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,10 +20,13 @@ time = "0.1" evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" +jsonrpc-core = { version = "1.0", optional = true } +jsonrpc-http-server = { version = "1.0", optional = true } [features] jit = ["evmjit"] evm_debug = [] +rpc = ["jsonrpc-core", "jsonrpc-http-server"] [[bin]] name = "client" diff --git a/src/bin/client/ethrpc.rs b/src/bin/client/ethrpc.rs new file mode 100644 index 000000000..d46a91559 --- /dev/null +++ b/src/bin/client/ethrpc.rs @@ -0,0 +1,53 @@ +extern crate jsonrpc_core; +extern crate jsonrpc_http_server; + +use std::sync::{Arc, RwLock}; +use self::jsonrpc_core::{IoHandler, IoDelegate, Params, Value, Error, ErrorCode}; +use ethcore::client::*; + +struct Eth { + client: Arc> +} + +impl Eth { + fn new(client: Arc>) -> Self { + Eth { + client: client + } + } + + fn block_number(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::U64(self.client.read().unwrap().chain_info().best_block_number)), + _ => Err(Error::new(ErrorCode::InvalidParams)), + } + } +} + +struct EthRpc; + +impl EthRpc { + fn build_handler(client: Arc>) -> IoHandler { + let mut handler = IoHandler::new(); + let mut eth = IoDelegate::new(Arc::new(Eth::new(client))); + eth.add_method("eth_blockNumber", Eth::block_number); + handler.add_delegate(eth); + handler + } +} + +pub struct HttpServer { + server: jsonrpc_http_server::Server +} + +impl HttpServer { + pub fn new(client: Arc>, threads: usize) -> HttpServer { + HttpServer { + server: jsonrpc_http_server::Server::new(EthRpc::build_handler(client), threads) + } + } + + pub fn start_async(self, addr: &str) { + self.server.start_async(addr) + } +} diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 3335d8a72..bf1b82909 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -4,6 +4,9 @@ extern crate rustc_serialize; extern crate log; extern crate env_logger; +#[cfg(feature = "rpc")] +mod ethrpc; + use std::io::stdin; use std::env; use log::{LogLevelFilter}; @@ -26,10 +29,22 @@ fn setup_log() { builder.init().unwrap(); } + +#[cfg(feature = "rpc")] +fn setup_rpc_server(client: Arc>) { + let server = ethrpc::HttpServer::new(client, 1); + server.start_async("127.0.0.1:3030"); +} + +#[cfg(not(feature = "rpc"))] +fn setup_rpc_server(_client: Arc>) { +} + fn main() { setup_log(); let spec = ethereum::new_frontier(); let mut service = ClientService::start(spec).unwrap(); + setup_rpc_server(service.client()); let io_handler = Box::new(ClientIoHandler { client: service.client(), timer: 0, info: Default::default() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); loop { From a8e4912551f1df3877189800d91d8445d17526ef Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 20 Jan 2016 15:49:49 +0100 Subject: [PATCH 008/138] cleanup ethrpc --- src/bin/client/ethrpc.rs | 75 +++++++++++++++++++++++++++++----------- src/bin/client/main.rs | 3 +- 2 files changed, 57 insertions(+), 21 deletions(-) diff --git a/src/bin/client/ethrpc.rs b/src/bin/client/ethrpc.rs index d46a91559..735250d87 100644 --- a/src/bin/client/ethrpc.rs +++ b/src/bin/client/ethrpc.rs @@ -5,17 +5,56 @@ use std::sync::{Arc, RwLock}; use self::jsonrpc_core::{IoHandler, IoDelegate, Params, Value, Error, ErrorCode}; use ethcore::client::*; -struct Eth { - client: Arc> +macro_rules! rpcerr { + () => (Err(Error::new(ErrorCode::InternalError))) } -impl Eth { - fn new(client: Arc>) -> Self { - Eth { +/// This could be a part of `jsonrpc_core`. Unfortunately, +/// "only traits defined in the current crate can be implemented for a type parameter". +pub trait IntoDelegate where T: Send + Sync + 'static { + /// This function should be called to translate custom type into IoDelegate + fn into_delegate(self) -> IoDelegate; +} + +/// eth rpc interface +pub trait Eth { + /// returns protocol version + fn protocol_version(&self, _: Params) -> Result { rpcerr!() } + + /// returns block author + fn author(&self, _: Params) -> Result { rpcerr!() } + + /// returns current gas_price + fn gas_price(&self, _: Params) -> Result { rpcerr!() } + + /// returns highest block number + fn block_number(&self, _: Params) -> Result { rpcerr!() } +} + +impl IntoDelegate for D where D: Eth + Send + Sync + 'static { + fn into_delegate(self) -> IoDelegate { + let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("eth_protocolVersion", D::protocol_version); + delegate.add_method("eth_coinbase", D::author); + delegate.add_method("eth_gasPrice", D::gas_price); + delegate.add_method("eth_blockNumber", D::block_number); + delegate + } +} + +pub struct EthClient { + client: Arc>, +} + +impl EthClient { + pub fn new(client: Arc>) -> Self { + EthClient { client: client } } +} +impl Eth for EthClient { fn block_number(&self, params: Params) -> Result { match params { Params::None => Ok(Value::U64(self.client.read().unwrap().chain_info().best_block_number)), @@ -24,30 +63,26 @@ impl Eth { } } -struct EthRpc; - -impl EthRpc { - fn build_handler(client: Arc>) -> IoHandler { - let mut handler = IoHandler::new(); - let mut eth = IoDelegate::new(Arc::new(Eth::new(client))); - eth.add_method("eth_blockNumber", Eth::block_number); - handler.add_delegate(eth); - handler - } -} pub struct HttpServer { - server: jsonrpc_http_server::Server + handler: IoHandler, + threads: usize } impl HttpServer { - pub fn new(client: Arc>, threads: usize) -> HttpServer { + pub fn new(threads: usize) -> HttpServer { HttpServer { - server: jsonrpc_http_server::Server::new(EthRpc::build_handler(client), threads) + handler: IoHandler::new(), + threads: threads } } + pub fn add_delegate(&mut self, delegate: I) where D: Send + Sync + 'static, I: IntoDelegate { + self.handler.add_delegate(delegate.into_delegate()); + } + pub fn start_async(self, addr: &str) { - self.server.start_async(addr) + let server = jsonrpc_http_server::Server::new(self.handler, self.threads); + server.start_async(addr) } } diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index bf1b82909..3dd9b9c5c 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -32,7 +32,8 @@ fn setup_log() { #[cfg(feature = "rpc")] fn setup_rpc_server(client: Arc>) { - let server = ethrpc::HttpServer::new(client, 1); + let mut server = ethrpc::HttpServer::new(1); + server.add_delegate(ethrpc::EthClient::new(client)); server.start_async("127.0.0.1:3030"); } From c0a923a2716fe97c9e41f34b77987dc0bd719d87 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 20 Jan 2016 15:55:29 +0100 Subject: [PATCH 009/138] basic .travis.yml --- .travis.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..a5b361fc4 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +language: rust + +rust: + - nightly + +os: + - osx + +before_script: + - brew update + - brew install rocksdb From a3ced5140ce07f90e3f4ef0d191d66669e891a03 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 20 Jan 2016 18:43:29 +0300 Subject: [PATCH 010/138] cache directories --- .travis.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index a5b361fc4..1efef4f21 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,3 +9,8 @@ os: before_script: - brew update - brew install rocksdb + +cache: + directories: + - $TRAVIS_BUILD_DIR/target + - $HOME/.cargo \ No newline at end of file From aa0760597b1d33615731aa06192595dd3fcf09a7 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 20 Jan 2016 16:52:22 +0100 Subject: [PATCH 011/138] Fixing delegatecall --- src/action_params.rs | 11 ++++++---- src/evm/ext.rs | 14 +++++++++++- src/evm/interpreter.rs | 49 +++++++++++++++++++++++++++++++++++------- src/evm/tests.rs | 41 +++++++++++++++++++++-------------- src/executive.rs | 25 +++++++++++++-------- src/externalities.rs | 32 +++++++++++++++++++++++++++ src/tests/executive.rs | 18 +++++++++++++++- 7 files changed, 151 insertions(+), 39 deletions(-) diff --git a/src/action_params.rs b/src/action_params.rs index da1ae0ce0..6f876874b 100644 --- a/src/action_params.rs +++ b/src/action_params.rs @@ -23,15 +23,17 @@ pub struct ActionParams { pub gas_price: U256, /// Transaction value. pub value: U256, + /// Should transfer value from sender to origin + pub is_value_transfer: bool, /// Code being executed. pub code: Option, /// Input data. pub data: Option } -impl ActionParams { - /// TODO [Gav Wood] Please document me - pub fn new() -> ActionParams { +impl Default for ActionParams { + /// Returns default ActionParams initialized with zeros + fn default() -> ActionParams { ActionParams { code_address: Address::new(), address: Address::new(), @@ -41,7 +43,8 @@ impl ActionParams { gas_price: U256::zero(), value: U256::zero(), code: None, - data: None + data: None, + is_value_transfer: true } } } diff --git a/src/evm/ext.rs b/src/evm/ext.rs index 4d2471593..b7bf609ca 100644 --- a/src/evm/ext.rs +++ b/src/evm/ext.rs @@ -26,7 +26,7 @@ pub enum MessageCallResult { Failed } -/// TODO [debris] Please document me +/// Externalities interface for EVMs pub trait Ext { /// Returns a value for given key. fn storage_at(&self, key: &H256) -> H256; @@ -61,6 +61,18 @@ pub trait Ext { code_address: &Address, output: &mut [u8]) -> MessageCallResult; + /// Delegate Message call. + /// + /// Returns Err, if we run out of gas. + /// Otherwise returns call_result which contains gas left + /// and true if subcall was successfull. + fn delegatecall(&mut self, + gas: &U256, + value: &U256, + data: &[u8], + code_address: &Address, + output: &mut [u8]) -> MessageCallResult; + /// Returns code at given address fn extcode(&self, address: &Address) -> Bytes; diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index 88823cbea..c7d2bfda8 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -564,17 +564,50 @@ impl Interpreter { } }; }, - instructions::CALL | instructions::CALLCODE | instructions::DELEGATECALL => { + instructions::DELEGATECALL => { + let call_gas = stack.pop_back(); + let code_address = stack.pop_back(); + let code_address = u256_to_address(&code_address); + + let value = params.value; + + let in_off = stack.pop_back(); + let in_size = stack.pop_back(); + let out_off = stack.pop_back(); + let out_size = stack.pop_back(); + + let can_call = ext.depth() < ext.schedule().max_depth; + if !can_call { + stack.push(U256::zero()); + return Ok(InstructionResult::UnusedGas(call_gas)); + } + + let call_result = { + // we need to write and read from memory in the same time + // and we don't want to copy + let input = unsafe { ::std::mem::transmute(mem.read_slice(in_off, in_size)) }; + let output = mem.writeable_slice(out_off, out_size); + ext.delegatecall(&call_gas, &value, input, &code_address, output) + }; + + return match call_result { + MessageCallResult::Success(gas_left) => { + stack.push(U256::one()); + Ok(InstructionResult::UnusedGas(gas_left)) + }, + MessageCallResult::Failed => { + stack.push(U256::zero()); + Ok(InstructionResult::Ok) + } + }; + }, + instructions::CALL | instructions::CALLCODE => { assert!(ext.schedule().call_value_transfer_gas > ext.schedule().call_stipend, "overflow possible"); let call_gas = stack.pop_back(); let code_address = stack.pop_back(); let code_address = u256_to_address(&code_address); - let is_delegatecall = instruction == instructions::DELEGATECALL; - let value = match is_delegatecall { - true => params.value, - false => stack.pop_back() - }; + let value = stack.pop_back(); let address = match instruction == instructions::CALL { true => &code_address, @@ -586,12 +619,12 @@ impl Interpreter { let out_off = stack.pop_back(); let out_size = stack.pop_back(); - let call_gas = call_gas + match !is_delegatecall && value > U256::zero() { + let call_gas = call_gas + match value > U256::zero() { true => U256::from(ext.schedule().call_stipend), false => U256::zero() }; - let can_call = (is_delegatecall || ext.balance(¶ms.address) >= value) && ext.depth() < ext.schedule().max_depth; + let can_call = ext.balance(¶ms.address) >= value && ext.depth() < ext.schedule().max_depth; if !can_call { stack.push(U256::zero()); diff --git a/src/evm/tests.rs b/src/evm/tests.rs index 8e1b5eff4..7eb9d484c 100644 --- a/src/evm/tests.rs +++ b/src/evm/tests.rs @@ -69,6 +69,15 @@ impl Ext for FakeExt { unimplemented!(); } + fn delegatecall(&mut self, + _gas: &U256, + _value: &U256, + _data: &[u8], + _address: &Address, + _output: &mut [u8]) -> MessageCallResult { + unimplemented!(); + } + fn extcode(&self, address: &Address) -> Bytes { self.codes.get(address).unwrap_or(&Bytes::new()).clone() } @@ -110,7 +119,7 @@ fn test_stack_underflow() { let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let code = "01600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code); @@ -137,7 +146,7 @@ fn test_add(factory: super::Factory) { let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code); @@ -157,7 +166,7 @@ fn test_sha3(factory: super::Factory) { let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let code = "6000600020600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code); @@ -177,7 +186,7 @@ fn test_address(factory: super::Factory) { let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let code = "30600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code); @@ -198,7 +207,7 @@ fn test_origin(factory: super::Factory) { let origin = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); let code = "32600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.origin = origin.clone(); params.gas = U256::from(100_000); @@ -220,7 +229,7 @@ fn test_sender(factory: super::Factory) { let sender = Address::from_str("cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); let code = "33600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); @@ -254,7 +263,7 @@ fn test_extcodecopy(factory: super::Factory) { let code = "333b60006000333c600051600055".from_hex().unwrap(); let sender_code = "6005600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); @@ -276,7 +285,7 @@ fn test_log_empty(factory: super::Factory) { let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let code = "60006000a0".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code); @@ -307,7 +316,7 @@ fn test_log_sender(factory: super::Factory) { let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let code = "60ff6000533360206000a1".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); @@ -332,7 +341,7 @@ fn test_blockhash(factory: super::Factory) { let code = "600040600055".from_hex().unwrap(); let blockhash = H256::from_str("123400000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681").unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code); @@ -354,7 +363,7 @@ fn test_calldataload(factory: super::Factory) { let code = "600135600055".from_hex().unwrap(); let data = "0123ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff23".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code); @@ -376,7 +385,7 @@ fn test_author(factory: super::Factory) { let author = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let code = "41600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.gas = U256::from(100_000); params.code = Some(code); let mut ext = FakeExt::new(); @@ -396,7 +405,7 @@ fn test_timestamp(factory: super::Factory) { let timestamp = 0x1234; let code = "42600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.gas = U256::from(100_000); params.code = Some(code); let mut ext = FakeExt::new(); @@ -416,7 +425,7 @@ fn test_number(factory: super::Factory) { let number = 0x1234; let code = "43600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.gas = U256::from(100_000); params.code = Some(code); let mut ext = FakeExt::new(); @@ -436,7 +445,7 @@ fn test_difficulty(factory: super::Factory) { let difficulty = U256::from(0x1234); let code = "44600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.gas = U256::from(100_000); params.code = Some(code); let mut ext = FakeExt::new(); @@ -456,7 +465,7 @@ fn test_gas_limit(factory: super::Factory) { let gas_limit = U256::from(0x1234); let code = "45600055".from_hex().unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.gas = U256::from(100_000); params.code = Some(code); let mut ext = FakeExt::new(); diff --git a/src/executive.rs b/src/executive.rs index 6c2b29e3f..ea1a8f6de 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -134,6 +134,7 @@ impl<'a> Executive<'a> { gas: init_gas, gas_price: t.gas_price, value: t.value, + is_value_transfer: true, code: Some(t.data.clone()), data: None, }; @@ -148,6 +149,7 @@ impl<'a> Executive<'a> { gas: init_gas, gas_price: t.gas_price, value: t.value, + is_value_transfer: true, code: self.state.code(address), data: Some(t.data.clone()), }; @@ -166,11 +168,14 @@ impl<'a> Executive<'a> { /// Modifies the substate and the output. /// Returns either gas_left or `evm::Error`. pub fn call(&mut self, params: ActionParams, substate: &mut Substate, mut output: BytesRef) -> evm::Result { + println!("Calling executive. Sender: {}", params.sender); // backup used in case of running out of gas let backup = self.state.clone(); // at first, transfer value to destination - self.state.transfer_balance(¶ms.sender, ¶ms.address, ¶ms.value); + if params.is_value_transfer { + self.state.transfer_balance(¶ms.sender, ¶ms.address, ¶ms.value); + } trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); if self.engine.is_builtin(¶ms.code_address) { @@ -227,7 +232,9 @@ impl<'a> Executive<'a> { self.state.new_contract(¶ms.address); // then transfer value to it - self.state.transfer_balance(¶ms.sender, ¶ms.address, ¶ms.value); + if params.is_value_transfer { + self.state.transfer_balance(¶ms.sender, ¶ms.address, ¶ms.value); + } let res = { let mut ext = self.to_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::InitContract); @@ -367,7 +374,7 @@ mod tests { fn test_sender_balance(factory: Factory) { let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let address = contract_address(&sender, &U256::zero()); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); @@ -424,7 +431,7 @@ mod tests { let address = contract_address(&sender, &U256::zero()); // TODO: add tests for 'callcreate' //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.origin = sender.clone(); @@ -477,7 +484,7 @@ mod tests { let address = contract_address(&sender, &U256::zero()); // TODO: add tests for 'callcreate' //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.origin = sender.clone(); @@ -528,7 +535,7 @@ mod tests { let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let address = contract_address(&sender, &U256::zero()); let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.origin = sender.clone(); @@ -584,7 +591,7 @@ mod tests { let address_b = Address::from_str("945304eb96065b2a98b57a48a06ae28d285a71b5" ).unwrap(); let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address_a.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); @@ -633,7 +640,7 @@ mod tests { let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let code = "600160005401600055600060006000600060003060e05a03f1600155".from_hex().unwrap(); let address = contract_address(&sender, &U256::zero()); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); params.code = Some(code.clone()); @@ -789,7 +796,7 @@ mod tests { let address = contract_address(&sender, &U256::zero()); // TODO: add tests for 'callcreate' //let next_address = contract_address(&address, &U256::zero()); - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); params.address = address.clone(); params.sender = sender.clone(); params.origin = sender.clone(); diff --git a/src/externalities.rs b/src/externalities.rs index 8b16cc72b..110b965d7 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -17,6 +17,7 @@ pub enum OutputPolicy<'a> { /// Transaction properties that externalities need to know about. pub struct OriginInfo { + sender: Address, address: Address, origin: Address, gas_price: U256 @@ -26,6 +27,7 @@ impl OriginInfo { /// Populates origin info from action params. pub fn from(params: &ActionParams) -> Self { OriginInfo { + sender: params.sender.clone(), address: params.address.clone(), origin: params.origin.clone(), gas_price: params.gas_price.clone() @@ -112,6 +114,7 @@ impl<'a> Ext for Externalities<'a> { gas: *gas, gas_price: self.origin_info.gas_price.clone(), value: value.clone(), + is_value_transfer: true, code: Some(code.to_vec()), data: None, }; @@ -129,6 +132,34 @@ impl<'a> Ext for Externalities<'a> { } } + fn delegatecall(&mut self, + gas: &U256, + value: &U256, + data: &[u8], + code_address: &Address, + output: &mut [u8]) -> MessageCallResult { + + let params = ActionParams { + code_address: code_address.clone(), + address: self.origin_info.address.clone(), + sender: self.origin_info.sender.clone(), + origin: self.origin_info.origin.clone(), + gas: *gas, + gas_price: self.origin_info.gas_price.clone(), + value: value.clone(), + is_value_transfer: false, + code: self.state.code(code_address), + data: Some(data.to_vec()), + }; + + let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth); + + match ex.call(params, self.substate, BytesRef::Fixed(output)) { + Ok(gas_left) => MessageCallResult::Success(gas_left), + _ => MessageCallResult::Failed + } + } + fn call(&mut self, gas: &U256, address: &Address, @@ -145,6 +176,7 @@ impl<'a> Ext for Externalities<'a> { gas: *gas, gas_price: self.origin_info.gas_price.clone(), value: value.clone(), + is_value_transfer: true, code: self.state.code(code_address), data: Some(data.to_vec()), }; diff --git a/src/tests/executive.rs b/src/tests/executive.rs index fe428e199..ca18850c4 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -115,6 +115,22 @@ impl<'a> Ext for TestExt<'a> { MessageCallResult::Success(*gas) } + fn delegatecall(&mut self, + gas: &U256, + value: &U256, + data: &[u8], + _code_address: &Address, + _output: &mut [u8]) -> MessageCallResult { + + self.callcreates.push(CallCreate { + data: data.to_vec(), + destination: None, + gas_limit: *gas, + value: *value + }); + MessageCallResult::Success(*gas) + } + fn extcode(&self, address: &Address) -> Bytes { self.ext.extcode(address) } @@ -200,7 +216,7 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { let engine = TestEngine::new(1, vm.clone()); // params - let mut params = ActionParams::new(); + let mut params = ActionParams::default(); test.find("exec").map(|exec| { params.address = xjson!(&exec["address"]); params.sender = xjson!(&exec["caller"]); From 8084e1b6d7288db1c6e6f91d8f1ffdb138217ab2 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 20 Jan 2016 17:01:58 +0100 Subject: [PATCH 012/138] Removing value from delegatecall function --- src/evm/ext.rs | 1 - src/evm/interpreter.rs | 4 +--- src/evm/tests.rs | 1 - src/externalities.rs | 5 +++-- src/tests/executive.rs | 3 +-- 5 files changed, 5 insertions(+), 9 deletions(-) diff --git a/src/evm/ext.rs b/src/evm/ext.rs index b7bf609ca..7e2f0f47f 100644 --- a/src/evm/ext.rs +++ b/src/evm/ext.rs @@ -68,7 +68,6 @@ pub trait Ext { /// and true if subcall was successfull. fn delegatecall(&mut self, gas: &U256, - value: &U256, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult; diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index c7d2bfda8..a5e8efc91 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -569,8 +569,6 @@ impl Interpreter { let code_address = stack.pop_back(); let code_address = u256_to_address(&code_address); - let value = params.value; - let in_off = stack.pop_back(); let in_size = stack.pop_back(); let out_off = stack.pop_back(); @@ -587,7 +585,7 @@ impl Interpreter { // and we don't want to copy let input = unsafe { ::std::mem::transmute(mem.read_slice(in_off, in_size)) }; let output = mem.writeable_slice(out_off, out_size); - ext.delegatecall(&call_gas, &value, input, &code_address, output) + ext.delegatecall(&call_gas, input, &code_address, output) }; return match call_result { diff --git a/src/evm/tests.rs b/src/evm/tests.rs index 7eb9d484c..83c58bbf8 100644 --- a/src/evm/tests.rs +++ b/src/evm/tests.rs @@ -71,7 +71,6 @@ impl Ext for FakeExt { fn delegatecall(&mut self, _gas: &U256, - _value: &U256, _data: &[u8], _address: &Address, _output: &mut [u8]) -> MessageCallResult { diff --git a/src/externalities.rs b/src/externalities.rs index 110b965d7..20326fe03 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -18,6 +18,7 @@ pub enum OutputPolicy<'a> { /// Transaction properties that externalities need to know about. pub struct OriginInfo { sender: Address, + value: U256, address: Address, origin: Address, gas_price: U256 @@ -28,6 +29,7 @@ impl OriginInfo { pub fn from(params: &ActionParams) -> Self { OriginInfo { sender: params.sender.clone(), + value: params.value.clone(), address: params.address.clone(), origin: params.origin.clone(), gas_price: params.gas_price.clone() @@ -134,7 +136,6 @@ impl<'a> Ext for Externalities<'a> { fn delegatecall(&mut self, gas: &U256, - value: &U256, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult { @@ -146,7 +147,7 @@ impl<'a> Ext for Externalities<'a> { origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: value.clone(), + value: self.origin_info.value.clone(), is_value_transfer: false, code: self.state.code(code_address), data: Some(data.to_vec()), diff --git a/src/tests/executive.rs b/src/tests/executive.rs index ca18850c4..17cfb7df5 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -117,7 +117,6 @@ impl<'a> Ext for TestExt<'a> { fn delegatecall(&mut self, gas: &U256, - value: &U256, data: &[u8], _code_address: &Address, _output: &mut [u8]) -> MessageCallResult { @@ -126,7 +125,7 @@ impl<'a> Ext for TestExt<'a> { data: data.to_vec(), destination: None, gas_limit: *gas, - value: *value + value: U256::zero() }); MessageCallResult::Success(*gas) } From cd9a0e4e588215b6e2839c9e8475b0ddecab3c38 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 20 Jan 2016 17:27:33 +0100 Subject: [PATCH 013/138] Changing is_value_transfer to enum --- src/action_params.rs | 19 ++++++++++++------- src/evm/interpreter.rs | 5 ++++- src/executive.rs | 14 ++++++-------- src/externalities.rs | 16 ++++++++-------- 4 files changed, 30 insertions(+), 24 deletions(-) diff --git a/src/action_params.rs b/src/action_params.rs index 6f876874b..a56e64f4d 100644 --- a/src/action_params.rs +++ b/src/action_params.rs @@ -3,8 +3,16 @@ use util::hash::*; use util::uint::*; use util::bytes::*; -// TODO: should be a trait, possible to avoid cloning everything from a Transaction(/View). +/// Transaction value +#[derive(Clone, Debug)] +pub enum ActionValue { + /// Value that should be transfered + Transfer(U256), + /// Apparent value for transaction (not transfered) + Apparent(U256) +} +// TODO: should be a trait, possible to avoid cloning everything from a Transaction(/View). /// Action (call/create) input params. Everything else should be specified in Externalities. #[derive(Clone, Debug)] pub struct ActionParams { @@ -22,9 +30,7 @@ pub struct ActionParams { /// Gas price. pub gas_price: U256, /// Transaction value. - pub value: U256, - /// Should transfer value from sender to origin - pub is_value_transfer: bool, + pub value: ActionValue, /// Code being executed. pub code: Option, /// Input data. @@ -41,10 +47,9 @@ impl Default for ActionParams { origin: Address::new(), gas: U256::zero(), gas_price: U256::zero(), - value: U256::zero(), + value: ActionValue::Transfer(U256::zero()), code: None, - data: None, - is_value_transfer: true + data: None } } } diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index a5e8efc91..52509a2c0 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -741,7 +741,10 @@ impl Interpreter { stack.push(address_to_u256(params.sender.clone())); }, instructions::CALLVALUE => { - stack.push(params.value.clone()); + stack.push(match params.value { + ActionValue::Transfer(val) => val, + ActionValue::Apparent(val) => val, + }); }, instructions::CALLDATALOAD => { let big_id = stack.pop_back(); diff --git a/src/executive.rs b/src/executive.rs index ea1a8f6de..dd61a5b97 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -133,8 +133,7 @@ impl<'a> Executive<'a> { origin: sender.clone(), gas: init_gas, gas_price: t.gas_price, - value: t.value, - is_value_transfer: true, + value: ActionValue::Transfer(t.value), code: Some(t.data.clone()), data: None, }; @@ -148,8 +147,7 @@ impl<'a> Executive<'a> { origin: sender.clone(), gas: init_gas, gas_price: t.gas_price, - value: t.value, - is_value_transfer: true, + value: ActionValue::Transfer(t.value), code: self.state.code(address), data: Some(t.data.clone()), }; @@ -173,8 +171,8 @@ impl<'a> Executive<'a> { let backup = self.state.clone(); // at first, transfer value to destination - if params.is_value_transfer { - self.state.transfer_balance(¶ms.sender, ¶ms.address, ¶ms.value); + if let ActionValue::Transfer(val) = params.value { + self.state.transfer_balance(¶ms.sender, ¶ms.address, &val); } trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); @@ -232,8 +230,8 @@ impl<'a> Executive<'a> { self.state.new_contract(¶ms.address); // then transfer value to it - if params.is_value_transfer { - self.state.transfer_balance(¶ms.sender, ¶ms.address, ¶ms.value); + if let ActionValue::Transfer(val) = params.value { + self.state.transfer_balance(¶ms.sender, ¶ms.address, &val); } let res = { diff --git a/src/externalities.rs b/src/externalities.rs index 20326fe03..a9a14c5c2 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -29,10 +29,13 @@ impl OriginInfo { pub fn from(params: &ActionParams) -> Self { OriginInfo { sender: params.sender.clone(), - value: params.value.clone(), address: params.address.clone(), origin: params.origin.clone(), - gas_price: params.gas_price.clone() + gas_price: params.gas_price.clone(), + value: match params.value { + ActionValue::Transfer(val) => val, + ActionValue::Apparent(val) => val, + } } } } @@ -115,8 +118,7 @@ impl<'a> Ext for Externalities<'a> { origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: value.clone(), - is_value_transfer: true, + value: ActionValue::Transfer(value.clone()), code: Some(code.to_vec()), data: None, }; @@ -147,8 +149,7 @@ impl<'a> Ext for Externalities<'a> { origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: self.origin_info.value.clone(), - is_value_transfer: false, + value: ActionValue::Apparent(self.origin_info.value.clone()), code: self.state.code(code_address), data: Some(data.to_vec()), }; @@ -176,8 +177,7 @@ impl<'a> Ext for Externalities<'a> { origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: value.clone(), - is_value_transfer: true, + value: ActionValue::Transfer(value.clone()), code: self.state.code(code_address), data: Some(data.to_vec()), }; From 651d2d66e04240862df7b00ff0bd765c9a354ac6 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 20 Jan 2016 17:31:37 +0100 Subject: [PATCH 014/138] Fixing tests --- src/executive.rs | 12 ++++++------ src/tests/executive.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/executive.rs b/src/executive.rs index dd61a5b97..f5fa9cbc7 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -377,7 +377,7 @@ mod tests { params.sender = sender.clone(); params.gas = U256::from(100_000); params.code = Some("3331600055".from_hex().unwrap()); - params.value = U256::from(0x7); + params.value = ActionValue::Transfer(U256::from(0x7)); let mut state = State::new_temp(); state.add_balance(&sender, &U256::from(0x100u64)); let info = EnvInfo::new(); @@ -435,7 +435,7 @@ mod tests { params.origin = sender.clone(); params.gas = U256::from(100_000); params.code = Some(code.clone()); - params.value = U256::from(100); + params.value = ActionValue::Transfer(U256::from(100)); let mut state = State::new_temp(); state.add_balance(&sender, &U256::from(100)); let info = EnvInfo::new(); @@ -488,7 +488,7 @@ mod tests { params.origin = sender.clone(); params.gas = U256::from(100_000); params.code = Some(code.clone()); - params.value = U256::from(100); + params.value = ActionValue::Transfer(U256::from(100)); let mut state = State::new_temp(); state.add_balance(&sender, &U256::from(100)); let info = EnvInfo::new(); @@ -539,7 +539,7 @@ mod tests { params.origin = sender.clone(); params.gas = U256::from(100_000); params.code = Some(code.clone()); - params.value = U256::from(100); + params.value = ActionValue::Transfer(U256::from(100)); let mut state = State::new_temp(); state.add_balance(&sender, &U256::from(100)); let info = EnvInfo::new(); @@ -594,7 +594,7 @@ mod tests { params.sender = sender.clone(); params.gas = U256::from(100_000); params.code = Some(code_a.clone()); - params.value = U256::from(100_000); + params.value = ActionValue::Transfer(U256::from(100_000)); let mut state = State::new_temp(); state.init_code(&address_a, code_a.clone()); @@ -800,7 +800,7 @@ mod tests { params.origin = sender.clone(); params.gas = U256::from(0x0186a0); params.code = Some(code.clone()); - params.value = U256::from_str("0de0b6b3a7640000").unwrap(); + params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); let mut state = State::new_temp(); state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap()); let info = EnvInfo::new(); diff --git a/src/tests/executive.rs b/src/tests/executive.rs index 17cfb7df5..bc88a4697 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -224,7 +224,7 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { params.data = xjson!(&exec["data"]); params.gas = xjson!(&exec["gas"]); params.gas_price = xjson!(&exec["gasPrice"]); - params.value = xjson!(&exec["value"]); + params.value = ActionValue::Transfer(xjson!(&exec["value"])); }); let out_of_gas = test.find("callcreates").map(|_calls| { From 9048a608dad37a993b7c5e19ebc007e5468230c8 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 20 Jan 2016 17:57:53 +0100 Subject: [PATCH 015/138] Populating last_hashes --- src/env_info.rs | 16 +++++++--------- src/header.rs | 2 +- src/tests/executive.rs | 14 ++++---------- 3 files changed, 12 insertions(+), 20 deletions(-) diff --git a/src/env_info.rs b/src/env_info.rs index 1246234ff..76d33cd5a 100644 --- a/src/env_info.rs +++ b/src/env_info.rs @@ -25,8 +25,14 @@ pub struct EnvInfo { } impl EnvInfo { - /// TODO [debris] Please document me + /// Create empty env_info initialized with zeros pub fn new() -> EnvInfo { + EnvInfo::default() + } +} + +impl Default for EnvInfo { + fn default() -> Self { EnvInfo { number: 0, author: Address::new(), @@ -53,11 +59,3 @@ impl FromJson for EnvInfo { } } } - -/// TODO: it should be the other way around. -/// `new` should call `default`. -impl Default for EnvInfo { - fn default() -> Self { - EnvInfo::new() - } -} diff --git a/src/header.rs b/src/header.rs index 28ed458fb..08da1d855 100644 --- a/src/header.rs +++ b/src/header.rs @@ -2,7 +2,7 @@ use util::*; use basic_types::*; use time::now_utc; -/// TODO [Gav Wood] Please document me +/// Current number of block type pub type BlockNumber = u64; /// A block header. diff --git a/src/tests/executive.rs b/src/tests/executive.rs index fe428e199..9abd31021 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -187,15 +187,9 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { BTreeMap::from_json(&s["storage"]).into_iter().foreach(|(k, v)| state.set_storage(&address, k, v)); }); - let mut info = EnvInfo::new(); - - test.find("env").map(|env| { - info.author = xjson!(&env["currentCoinbase"]); - info.difficulty = xjson!(&env["currentDifficulty"]); - info.gas_limit = xjson!(&env["currentGasLimit"]); - info.number = xjson!(&env["currentNumber"]); - info.timestamp = xjson!(&env["currentTimestamp"]); - }); + let info = test.find("env").map(|env| { + EnvInfo::from_json(env) + }).unwrap_or_default(); let engine = TestEngine::new(1, vm.clone()); @@ -277,7 +271,7 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { declare_test!{ExecutiveTests_vmArithmeticTest, "VMTests/vmArithmeticTest"} declare_test!{ExecutiveTests_vmBitwiseLogicOperationTest, "VMTests/vmBitwiseLogicOperationTest"} // this one crashes with some vm internal error. Separately they pass. -declare_test_ignore!{ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} +declare_test!{ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} declare_test!{ExecutiveTests_vmEnvironmentalInfoTest, "VMTests/vmEnvironmentalInfoTest"} declare_test!{ExecutiveTests_vmIOandFlowOperationsTest, "VMTests/vmIOandFlowOperationsTest"} // this one take way too long. From 78b279a73481f32dfbe482521141ed678d63e8f0 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 20 Jan 2016 20:44:26 +0100 Subject: [PATCH 016/138] Fixing docs for BlockNumber --- src/header.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/header.rs b/src/header.rs index 08da1d855..523cf1b47 100644 --- a/src/header.rs +++ b/src/header.rs @@ -2,7 +2,7 @@ use util::*; use basic_types::*; use time::now_utc; -/// Current number of block type +/// Type for BlockNumber pub type BlockNumber = u64; /// A block header. From 80c7dee164d34c961d0831f38cc5c24be0d1d211 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 20 Jan 2016 20:45:31 +0100 Subject: [PATCH 017/138] Fixing typo --- src/header.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/header.rs b/src/header.rs index 523cf1b47..556795b40 100644 --- a/src/header.rs +++ b/src/header.rs @@ -2,7 +2,7 @@ use util::*; use basic_types::*; use time::now_utc; -/// Type for BlockNumber +/// Type for Block number pub type BlockNumber = u64; /// A block header. From 013ac2cf9a4b1ccb0c49610590520f6871e3963a Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 21 Jan 2016 00:54:19 +0100 Subject: [PATCH 018/138] rpc api in progress --- src/bin/client/ethrpc.rs | 119 +++++++++++++++++++++++++++++++-------- src/bin/client/main.rs | 7 ++- 2 files changed, 102 insertions(+), 24 deletions(-) diff --git a/src/bin/client/ethrpc.rs b/src/bin/client/ethrpc.rs index 735250d87..09c93041f 100644 --- a/src/bin/client/ethrpc.rs +++ b/src/bin/client/ethrpc.rs @@ -2,46 +2,101 @@ extern crate jsonrpc_core; extern crate jsonrpc_http_server; use std::sync::{Arc, RwLock}; +use rustc_serialize::hex::ToHex; use self::jsonrpc_core::{IoHandler, IoDelegate, Params, Value, Error, ErrorCode}; use ethcore::client::*; +use util::hash::*; macro_rules! rpcerr { - () => (Err(Error::new(ErrorCode::InternalError))) + () => (Err(Error::internal_error())) } -/// This could be a part of `jsonrpc_core`. Unfortunately, -/// "only traits defined in the current crate can be implemented for a type parameter". -pub trait IntoDelegate where T: Send + Sync + 'static { - /// This function should be called to translate custom type into IoDelegate - fn into_delegate(self) -> IoDelegate; +/// Web3 rpc interface. +pub trait Web3: Sized + Send + Sync + 'static { + /// Returns current client version. + fn client_version(&self, _: Params) -> Result { rpcerr!() } + + /// Should be used to convert object to io delegate. + fn to_delegate(self) -> IoDelegate { + let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("web3_clientVersion", Web3::client_version); + delegate + } } -/// eth rpc interface -pub trait Eth { - /// returns protocol version + +/// Eth rpc interface. +pub trait Eth: Sized + Send + Sync + 'static { + /// Returns protocol version. fn protocol_version(&self, _: Params) -> Result { rpcerr!() } - /// returns block author + /// Returns block author. fn author(&self, _: Params) -> Result { rpcerr!() } - /// returns current gas_price + /// Returns current gas_price. fn gas_price(&self, _: Params) -> Result { rpcerr!() } - /// returns highest block number + /// Returns highest block number. fn block_number(&self, _: Params) -> Result { rpcerr!() } + + /// Returns block with given index / hash. + fn block(&self, _: Params) -> Result { rpcerr!() } + + /// Returns true if client is actively mining new blocks. + fn is_mining(&self, _: Params) -> Result { rpcerr!() } + + /// Returns the number of hashes per second that the node is mining with. + fn hashrate(&self, _: Params) -> Result { rpcerr!() } + + /// Returns the number of transactions in a block. + fn block_transaction_count(&self, _: Params) -> Result { rpcerr!() } + + /// Should be used to convert object to io delegate. + fn to_delegate(self) -> IoDelegate { + let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("eth_protocolVersion", Eth::protocol_version); + delegate.add_method("eth_coinbase", Eth::author); + delegate.add_method("eth_gasPrice", Eth::gas_price); + delegate.add_method("eth_blockNumber", Eth::block_number); + delegate.add_method("eth_getBlockByNumber", Eth::block); + delegate.add_method("eth_mining", Eth::is_mining); + delegate.add_method("eth_hashrate", Eth::hashrate); + delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); + delegate + } } -impl IntoDelegate for D where D: Eth + Send + Sync + 'static { - fn into_delegate(self) -> IoDelegate { +/// Net rpc interface. +pub trait Net: Sized + Send + Sync + 'static { + /// Returns protocol version. + fn version(&self, _: Params) -> Result { rpcerr!() } + + /// Returns number of peers connected to node. + fn peer_count(&self, _: Params) -> Result { rpcerr!() } + + fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("eth_protocolVersion", D::protocol_version); - delegate.add_method("eth_coinbase", D::author); - delegate.add_method("eth_gasPrice", D::gas_price); - delegate.add_method("eth_blockNumber", D::block_number); + delegate.add_method("peer_count", Net::version); + delegate.add_method("net_version", Net::version); delegate } } +pub struct Web3Client; + +impl Web3Client { + pub fn new() -> Self { Web3Client } +} + +impl Web3 for Web3Client { + fn client_version(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::String("parity/0.1.0/-/rust1.7-nightly".to_string())), + _ => Err(Error::invalid_params()) + } + } +} + pub struct EthClient { client: Arc>, } @@ -55,15 +110,35 @@ impl EthClient { } impl Eth for EthClient { + fn protocol_version(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::U64(63)), + _ => Err(Error::invalid_params()) + } + } + + fn author(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::String(Address::new().to_hex())), + _ => Err(Error::invalid_params()) + } + } + fn block_number(&self, params: Params) -> Result { match params { Params::None => Ok(Value::U64(self.client.read().unwrap().chain_info().best_block_number)), - _ => Err(Error::new(ErrorCode::InvalidParams)), + _ => Err(Error::invalid_params()) + } + } + + fn is_mining(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::Bool(false)), + _ => Err(Error::invalid_params()) } } } - pub struct HttpServer { handler: IoHandler, threads: usize @@ -77,8 +152,8 @@ impl HttpServer { } } - pub fn add_delegate(&mut self, delegate: I) where D: Send + Sync + 'static, I: IntoDelegate { - self.handler.add_delegate(delegate.into_delegate()); + pub fn add_delegate(&mut self, delegate: IoDelegate) where D: Send + Sync + 'static { + self.handler.add_delegate(delegate); } pub fn start_async(self, addr: &str) { diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 3dd9b9c5c..f9f073718 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -32,8 +32,11 @@ fn setup_log() { #[cfg(feature = "rpc")] fn setup_rpc_server(client: Arc>) { - let mut server = ethrpc::HttpServer::new(1); - server.add_delegate(ethrpc::EthClient::new(client)); + use self::ethrpc::*; + + let mut server = HttpServer::new(1); + server.add_delegate(Web3Client::new().to_delegate()); + server.add_delegate(EthClient::new(client).to_delegate()); server.start_async("127.0.0.1:3030"); } From 201c4726a22bc0d9e5f32d519f119596233a8be1 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 21 Jan 2016 01:19:29 +0100 Subject: [PATCH 019/138] split rpc into multiple files --- src/bin/client/ethrpc.rs | 163 ------------------------------ src/bin/client/main.rs | 4 +- src/bin/client/rpc/impls/eth.rs | 48 +++++++++ src/bin/client/rpc/impls/mod.rs | 7 ++ src/bin/client/rpc/impls/net.rs | 0 src/bin/client/rpc/impls/web3.rs | 17 ++++ src/bin/client/rpc/mod.rs | 38 +++++++ src/bin/client/rpc/traits/eth.rs | 45 +++++++++ src/bin/client/rpc/traits/mod.rs | 8 ++ src/bin/client/rpc/traits/net.rs | 20 ++++ src/bin/client/rpc/traits/web3.rs | 16 +++ 11 files changed, 201 insertions(+), 165 deletions(-) delete mode 100644 src/bin/client/ethrpc.rs create mode 100644 src/bin/client/rpc/impls/eth.rs create mode 100644 src/bin/client/rpc/impls/mod.rs create mode 100644 src/bin/client/rpc/impls/net.rs create mode 100644 src/bin/client/rpc/impls/web3.rs create mode 100644 src/bin/client/rpc/mod.rs create mode 100644 src/bin/client/rpc/traits/eth.rs create mode 100644 src/bin/client/rpc/traits/mod.rs create mode 100644 src/bin/client/rpc/traits/net.rs create mode 100644 src/bin/client/rpc/traits/web3.rs diff --git a/src/bin/client/ethrpc.rs b/src/bin/client/ethrpc.rs deleted file mode 100644 index 09c93041f..000000000 --- a/src/bin/client/ethrpc.rs +++ /dev/null @@ -1,163 +0,0 @@ -extern crate jsonrpc_core; -extern crate jsonrpc_http_server; - -use std::sync::{Arc, RwLock}; -use rustc_serialize::hex::ToHex; -use self::jsonrpc_core::{IoHandler, IoDelegate, Params, Value, Error, ErrorCode}; -use ethcore::client::*; -use util::hash::*; - -macro_rules! rpcerr { - () => (Err(Error::internal_error())) -} - -/// Web3 rpc interface. -pub trait Web3: Sized + Send + Sync + 'static { - /// Returns current client version. - fn client_version(&self, _: Params) -> Result { rpcerr!() } - - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("web3_clientVersion", Web3::client_version); - delegate - } -} - - -/// Eth rpc interface. -pub trait Eth: Sized + Send + Sync + 'static { - /// Returns protocol version. - fn protocol_version(&self, _: Params) -> Result { rpcerr!() } - - /// Returns block author. - fn author(&self, _: Params) -> Result { rpcerr!() } - - /// Returns current gas_price. - fn gas_price(&self, _: Params) -> Result { rpcerr!() } - - /// Returns highest block number. - fn block_number(&self, _: Params) -> Result { rpcerr!() } - - /// Returns block with given index / hash. - fn block(&self, _: Params) -> Result { rpcerr!() } - - /// Returns true if client is actively mining new blocks. - fn is_mining(&self, _: Params) -> Result { rpcerr!() } - - /// Returns the number of hashes per second that the node is mining with. - fn hashrate(&self, _: Params) -> Result { rpcerr!() } - - /// Returns the number of transactions in a block. - fn block_transaction_count(&self, _: Params) -> Result { rpcerr!() } - - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("eth_protocolVersion", Eth::protocol_version); - delegate.add_method("eth_coinbase", Eth::author); - delegate.add_method("eth_gasPrice", Eth::gas_price); - delegate.add_method("eth_blockNumber", Eth::block_number); - delegate.add_method("eth_getBlockByNumber", Eth::block); - delegate.add_method("eth_mining", Eth::is_mining); - delegate.add_method("eth_hashrate", Eth::hashrate); - delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); - delegate - } -} - -/// Net rpc interface. -pub trait Net: Sized + Send + Sync + 'static { - /// Returns protocol version. - fn version(&self, _: Params) -> Result { rpcerr!() } - - /// Returns number of peers connected to node. - fn peer_count(&self, _: Params) -> Result { rpcerr!() } - - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("peer_count", Net::version); - delegate.add_method("net_version", Net::version); - delegate - } -} - -pub struct Web3Client; - -impl Web3Client { - pub fn new() -> Self { Web3Client } -} - -impl Web3 for Web3Client { - fn client_version(&self, params: Params) -> Result { - match params { - Params::None => Ok(Value::String("parity/0.1.0/-/rust1.7-nightly".to_string())), - _ => Err(Error::invalid_params()) - } - } -} - -pub struct EthClient { - client: Arc>, -} - -impl EthClient { - pub fn new(client: Arc>) -> Self { - EthClient { - client: client - } - } -} - -impl Eth for EthClient { - fn protocol_version(&self, params: Params) -> Result { - match params { - Params::None => Ok(Value::U64(63)), - _ => Err(Error::invalid_params()) - } - } - - fn author(&self, params: Params) -> Result { - match params { - Params::None => Ok(Value::String(Address::new().to_hex())), - _ => Err(Error::invalid_params()) - } - } - - fn block_number(&self, params: Params) -> Result { - match params { - Params::None => Ok(Value::U64(self.client.read().unwrap().chain_info().best_block_number)), - _ => Err(Error::invalid_params()) - } - } - - fn is_mining(&self, params: Params) -> Result { - match params { - Params::None => Ok(Value::Bool(false)), - _ => Err(Error::invalid_params()) - } - } -} - -pub struct HttpServer { - handler: IoHandler, - threads: usize -} - -impl HttpServer { - pub fn new(threads: usize) -> HttpServer { - HttpServer { - handler: IoHandler::new(), - threads: threads - } - } - - pub fn add_delegate(&mut self, delegate: IoDelegate) where D: Send + Sync + 'static { - self.handler.add_delegate(delegate); - } - - pub fn start_async(self, addr: &str) { - let server = jsonrpc_http_server::Server::new(self.handler, self.threads); - server.start_async(addr) - } -} diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index f9f073718..bb19ca700 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -5,7 +5,7 @@ extern crate log; extern crate env_logger; #[cfg(feature = "rpc")] -mod ethrpc; +mod rpc; use std::io::stdin; use std::env; @@ -32,7 +32,7 @@ fn setup_log() { #[cfg(feature = "rpc")] fn setup_rpc_server(client: Arc>) { - use self::ethrpc::*; + use self::rpc::*; let mut server = HttpServer::new(1); server.add_delegate(Web3Client::new().to_delegate()); diff --git a/src/bin/client/rpc/impls/eth.rs b/src/bin/client/rpc/impls/eth.rs new file mode 100644 index 000000000..89c23419e --- /dev/null +++ b/src/bin/client/rpc/impls/eth.rs @@ -0,0 +1,48 @@ +use std::sync::{Arc, RwLock}; +use rustc_serialize::hex::ToHex; +use util::hash::*; +use ethcore::client::*; +use rpc::jsonrpc_core::*; +use rpc::Eth; + +pub struct EthClient { + client: Arc>, +} + +impl EthClient { + pub fn new(client: Arc>) -> Self { + EthClient { + client: client + } + } +} + +impl Eth for EthClient { + fn protocol_version(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::U64(63)), + _ => Err(Error::invalid_params()) + } + } + + fn author(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::String(Address::new().to_hex())), + _ => Err(Error::invalid_params()) + } + } + + fn block_number(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::U64(self.client.read().unwrap().chain_info().best_block_number)), + _ => Err(Error::invalid_params()) + } + } + + fn is_mining(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::Bool(false)), + _ => Err(Error::invalid_params()) + } + } +} diff --git a/src/bin/client/rpc/impls/mod.rs b/src/bin/client/rpc/impls/mod.rs new file mode 100644 index 000000000..d229c5c13 --- /dev/null +++ b/src/bin/client/rpc/impls/mod.rs @@ -0,0 +1,7 @@ +//! Ethereum rpc interface implementation. +pub mod web3; +pub mod eth; +pub mod net; + +pub use self::web3::Web3Client; +pub use self::eth::EthClient; diff --git a/src/bin/client/rpc/impls/net.rs b/src/bin/client/rpc/impls/net.rs new file mode 100644 index 000000000..e69de29bb diff --git a/src/bin/client/rpc/impls/web3.rs b/src/bin/client/rpc/impls/web3.rs new file mode 100644 index 000000000..b7d8919e2 --- /dev/null +++ b/src/bin/client/rpc/impls/web3.rs @@ -0,0 +1,17 @@ +use rpc::jsonrpc_core::*; +use rpc::Web3; + +pub struct Web3Client; + +impl Web3Client { + pub fn new() -> Self { Web3Client } +} + +impl Web3 for Web3Client { + fn client_version(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::String("parity/0.1.0/-/rust1.7-nightly".to_string())), + _ => Err(Error::invalid_params()) + } + } +} diff --git a/src/bin/client/rpc/mod.rs b/src/bin/client/rpc/mod.rs new file mode 100644 index 000000000..1053904e9 --- /dev/null +++ b/src/bin/client/rpc/mod.rs @@ -0,0 +1,38 @@ +extern crate jsonrpc_core; +extern crate jsonrpc_http_server; + +use self::jsonrpc_core::{IoHandler, IoDelegate}; + +macro_rules! rpcerr { + () => (Err(Error::internal_error())) +} + +pub mod traits; +pub mod impls; + +pub use self::traits::{Web3, Eth, Net}; +pub use self::impls::*; + + +pub struct HttpServer { + handler: IoHandler, + threads: usize +} + +impl HttpServer { + pub fn new(threads: usize) -> HttpServer { + HttpServer { + handler: IoHandler::new(), + threads: threads + } + } + + pub fn add_delegate(&mut self, delegate: IoDelegate) where D: Send + Sync + 'static { + self.handler.add_delegate(delegate); + } + + pub fn start_async(self, addr: &str) { + let server = jsonrpc_http_server::Server::new(self.handler, self.threads); + server.start_async(addr) + } +} diff --git a/src/bin/client/rpc/traits/eth.rs b/src/bin/client/rpc/traits/eth.rs new file mode 100644 index 000000000..8703d21fc --- /dev/null +++ b/src/bin/client/rpc/traits/eth.rs @@ -0,0 +1,45 @@ +//! Eth rpc interface. +use std::sync::Arc; +use rpc::jsonrpc_core::*; + +/// Eth rpc interface. +pub trait Eth: Sized + Send + Sync + 'static { + /// Returns protocol version. + fn protocol_version(&self, _: Params) -> Result { rpcerr!() } + + /// Returns block author. + fn author(&self, _: Params) -> Result { rpcerr!() } + + /// Returns current gas_price. + fn gas_price(&self, _: Params) -> Result { rpcerr!() } + + /// Returns highest block number. + fn block_number(&self, _: Params) -> Result { rpcerr!() } + + /// Returns block with given index / hash. + fn block(&self, _: Params) -> Result { rpcerr!() } + + /// Returns true if client is actively mining new blocks. + fn is_mining(&self, _: Params) -> Result { rpcerr!() } + + /// Returns the number of hashes per second that the node is mining with. + fn hashrate(&self, _: Params) -> Result { rpcerr!() } + + /// Returns the number of transactions in a block. + fn block_transaction_count(&self, _: Params) -> Result { rpcerr!() } + + /// Should be used to convert object to io delegate. + fn to_delegate(self) -> IoDelegate { + let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("eth_protocolVersion", Eth::protocol_version); + delegate.add_method("eth_coinbase", Eth::author); + delegate.add_method("eth_gasPrice", Eth::gas_price); + delegate.add_method("eth_blockNumber", Eth::block_number); + delegate.add_method("eth_getBlockByNumber", Eth::block); + delegate.add_method("eth_mining", Eth::is_mining); + delegate.add_method("eth_hashrate", Eth::hashrate); + delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); + delegate + } +} + diff --git a/src/bin/client/rpc/traits/mod.rs b/src/bin/client/rpc/traits/mod.rs new file mode 100644 index 000000000..83f70f17d --- /dev/null +++ b/src/bin/client/rpc/traits/mod.rs @@ -0,0 +1,8 @@ +//! Ethereum rpc interfaces. +pub mod web3; +pub mod eth; +pub mod net; + +pub use self::web3::Web3; +pub use self::eth::Eth; +pub use self::net::Net; diff --git a/src/bin/client/rpc/traits/net.rs b/src/bin/client/rpc/traits/net.rs new file mode 100644 index 000000000..7cb7f6bee --- /dev/null +++ b/src/bin/client/rpc/traits/net.rs @@ -0,0 +1,20 @@ +//! Net rpc interface. +use std::sync::Arc; +use rpc::jsonrpc_core::*; + +/// Net rpc interface. +pub trait Net: Sized + Send + Sync + 'static { + /// Returns protocol version. + fn version(&self, _: Params) -> Result { rpcerr!() } + + /// Returns number of peers connected to node. + fn peer_count(&self, _: Params) -> Result { rpcerr!() } + + /// Should be used to convert object to io delegate. + fn to_delegate(self) -> IoDelegate { + let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("peer_count", Net::version); + delegate.add_method("net_version", Net::version); + delegate + } +} diff --git a/src/bin/client/rpc/traits/web3.rs b/src/bin/client/rpc/traits/web3.rs new file mode 100644 index 000000000..b71c867aa --- /dev/null +++ b/src/bin/client/rpc/traits/web3.rs @@ -0,0 +1,16 @@ +//! Web3 rpc interface. +use std::sync::Arc; +use rpc::jsonrpc_core::*; + +/// Web3 rpc interface. +pub trait Web3: Sized + Send + Sync + 'static { + /// Returns current client version. + fn client_version(&self, _: Params) -> Result { rpcerr!() } + + /// Should be used to convert object to io delegate. + fn to_delegate(self) -> IoDelegate { + let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("web3_clientVersion", Web3::client_version); + delegate + } +} From 85de41642e20e4c7c391af3962e475cb86931d5f Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 21 Jan 2016 11:25:39 +0100 Subject: [PATCH 020/138] rpc api in progress --- src/bin/client/main.rs | 4 ++- src/bin/client/rpc/impls/eth.rs | 42 +++++++++++++++++++++++++++++++- src/bin/client/rpc/impls/mod.rs | 3 ++- src/bin/client/rpc/impls/net.rs | 15 ++++++++++++ src/bin/client/rpc/mod.rs | 2 +- src/bin/client/rpc/traits/eth.rs | 20 +++++++++++++++ src/bin/client/rpc/traits/mod.rs | 2 +- 7 files changed, 83 insertions(+), 5 deletions(-) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index bb19ca700..fc1b10b8a 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -36,7 +36,9 @@ fn setup_rpc_server(client: Arc>) { let mut server = HttpServer::new(1); server.add_delegate(Web3Client::new().to_delegate()); - server.add_delegate(EthClient::new(client).to_delegate()); + server.add_delegate(EthClient::new(client.clone()).to_delegate()); + server.add_delegate(EthFilterClient::new(client).to_delegate()); + server.add_delegate(NetClient::new().to_delegate()); server.start_async("127.0.0.1:3030"); } diff --git a/src/bin/client/rpc/impls/eth.rs b/src/bin/client/rpc/impls/eth.rs index 89c23419e..1151909ec 100644 --- a/src/bin/client/rpc/impls/eth.rs +++ b/src/bin/client/rpc/impls/eth.rs @@ -3,7 +3,7 @@ use rustc_serialize::hex::ToHex; use util::hash::*; use ethcore::client::*; use rpc::jsonrpc_core::*; -use rpc::Eth; +use rpc::{Eth, EthFilter}; pub struct EthClient { client: Arc>, @@ -32,6 +32,13 @@ impl Eth for EthClient { } } + fn gas_price(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::U64(0)), + _ => Err(Error::invalid_params()) + } + } + fn block_number(&self, params: Params) -> Result { match params { Params::None => Ok(Value::U64(self.client.read().unwrap().chain_info().best_block_number)), @@ -45,4 +52,37 @@ impl Eth for EthClient { _ => Err(Error::invalid_params()) } } + + fn hashrate(&self, params: Params) -> Result { + match params { + Params::None => Ok(Value::U64(0)), + _ => Err(Error::invalid_params()) + } + } +} + +pub struct EthFilterClient { + client: Arc> +} + +impl EthFilterClient { + pub fn new(client: Arc>) -> Self { + EthFilterClient { + client: client + } + } +} + +impl EthFilter for EthFilterClient { + fn new_block_filter(&self, _params: Params) -> Result { + Ok(Value::U64(0)) + } + + fn new_pending_transaction_filter(&self, _params: Params) -> Result { + Ok(Value::U64(1)) + } + + fn filter_changes(&self, _: Params) -> Result { + Ok(Value::String(self.client.read().unwrap().chain_info().best_block_hash.to_hex())) + } } diff --git a/src/bin/client/rpc/impls/mod.rs b/src/bin/client/rpc/impls/mod.rs index d229c5c13..813a168fd 100644 --- a/src/bin/client/rpc/impls/mod.rs +++ b/src/bin/client/rpc/impls/mod.rs @@ -4,4 +4,5 @@ pub mod eth; pub mod net; pub use self::web3::Web3Client; -pub use self::eth::EthClient; +pub use self::eth::{EthClient, EthFilterClient}; +pub use self::net::NetClient; diff --git a/src/bin/client/rpc/impls/net.rs b/src/bin/client/rpc/impls/net.rs index e69de29bb..6e528d156 100644 --- a/src/bin/client/rpc/impls/net.rs +++ b/src/bin/client/rpc/impls/net.rs @@ -0,0 +1,15 @@ +//! Net rpc implementation. +use rpc::jsonrpc_core::*; +use rpc::Net; + +pub struct NetClient; + +impl NetClient { + pub fn new() -> Self { NetClient } +} + +impl Net for NetClient { + fn peer_count(&self, _params: Params) -> Result { + Ok(Value::U64(0)) + } +} diff --git a/src/bin/client/rpc/mod.rs b/src/bin/client/rpc/mod.rs index 1053904e9..bf18e4b5f 100644 --- a/src/bin/client/rpc/mod.rs +++ b/src/bin/client/rpc/mod.rs @@ -10,7 +10,7 @@ macro_rules! rpcerr { pub mod traits; pub mod impls; -pub use self::traits::{Web3, Eth, Net}; +pub use self::traits::{Web3, Eth, EthFilter, Net}; pub use self::impls::*; diff --git a/src/bin/client/rpc/traits/eth.rs b/src/bin/client/rpc/traits/eth.rs index 8703d21fc..dfc72e89a 100644 --- a/src/bin/client/rpc/traits/eth.rs +++ b/src/bin/client/rpc/traits/eth.rs @@ -43,3 +43,23 @@ pub trait Eth: Sized + Send + Sync + 'static { } } +// TODO: do filters api properly if we commit outselves to polling again... +pub trait EthFilter: Sized + Send + Sync + 'static { + /// Returns id of new block filter + fn new_block_filter(&self, _: Params) -> Result { rpcerr!() } + + /// Returns id of new block filter + fn new_pending_transaction_filter(&self, _: Params) -> Result { rpcerr!() } + + /// Returns filter changes since last poll + fn filter_changes(&self, _: Params) -> Result { rpcerr!() } + + /// Should be used to convert object to io delegate. + fn to_delegate(self) -> IoDelegate { + let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter); + delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter); + delegate.add_method("eth_getFilterChanges", EthFilter::new_block_filter); + delegate + } +} diff --git a/src/bin/client/rpc/traits/mod.rs b/src/bin/client/rpc/traits/mod.rs index 83f70f17d..2fa52d538 100644 --- a/src/bin/client/rpc/traits/mod.rs +++ b/src/bin/client/rpc/traits/mod.rs @@ -4,5 +4,5 @@ pub mod eth; pub mod net; pub use self::web3::Web3; -pub use self::eth::Eth; +pub use self::eth::{Eth, EthFilter}; pub use self::net::Net; From 124cfcc11ef1a170a609c261ab0bf6fb5c016b16 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Thu, 21 Jan 2016 16:08:09 +0100 Subject: [PATCH 021/138] Possibility to declare "heavy tests" --- Cargo.toml | 3 ++- src/evm/interpreter.rs | 6 +++--- src/null_engine.rs | 2 +- src/tests/executive.rs | 5 ++--- src/tests/state.rs | 10 +++++----- src/tests/test_common.rs | 38 ++++++++++++++++++++++++-------------- src/tests/transaction.rs | 4 ++-- 7 files changed, 39 insertions(+), 29 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 04c4bf956..5123ec977 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,8 @@ num_cpus = "0.2" [features] jit = ["evmjit"] -evm_debug = [] +test-heavy = [] +evm-debug = [] [[bin]] name = "client" diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index 88823cbea..a384f3791 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -7,19 +7,19 @@ use super::instructions::Instruction; use std::marker::Copy; use evm::{MessageCallResult, ContractCreateResult}; -#[cfg(not(feature = "evm_debug"))] +#[cfg(not(feature = "evm-debug"))] macro_rules! evm_debug { ($x: expr) => {} } -#[cfg(feature = "evm_debug")] +#[cfg(feature = "evm-debug")] macro_rules! evm_debug { ($x: expr) => { $x } } -#[cfg(feature = "evm_debug")] +#[cfg(feature = "evm-debug")] fn color(instruction: Instruction, name: &'static str) -> String { let c = instruction as usize % 6; let colors = [31, 34, 33, 32, 35, 36]; diff --git a/src/null_engine.rs b/src/null_engine.rs index 3b03508a2..3c829606c 100644 --- a/src/null_engine.rs +++ b/src/null_engine.rs @@ -11,7 +11,7 @@ pub struct NullEngine { } impl NullEngine { - /// TODO [Tomusdrw] Please document me + /// Returns new instance of NullEngine with default VM Factory pub fn new_boxed(spec: Spec) -> Box { Box::new(NullEngine{ spec: spec, diff --git a/src/tests/executive.rs b/src/tests/executive.rs index fe428e199..6281b3eeb 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -277,11 +277,10 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { declare_test!{ExecutiveTests_vmArithmeticTest, "VMTests/vmArithmeticTest"} declare_test!{ExecutiveTests_vmBitwiseLogicOperationTest, "VMTests/vmBitwiseLogicOperationTest"} // this one crashes with some vm internal error. Separately they pass. -declare_test_ignore!{ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} +declare_test!{ignore => ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} declare_test!{ExecutiveTests_vmEnvironmentalInfoTest, "VMTests/vmEnvironmentalInfoTest"} declare_test!{ExecutiveTests_vmIOandFlowOperationsTest, "VMTests/vmIOandFlowOperationsTest"} -// this one take way too long. -declare_test_ignore!{ExecutiveTests_vmInputLimits, "VMTests/vmInputLimits"} +declare_test!{heavy => ExecutiveTests_vmInputLimits, "VMTests/vmInputLimits"} declare_test!{ExecutiveTests_vmLogTest, "VMTests/vmLogTest"} declare_test!{ExecutiveTests_vmPerformanceTest, "VMTests/vmPerformanceTest"} declare_test!{ExecutiveTests_vmPushDupSwapTest, "VMTests/vmPushDupSwapTest"} diff --git a/src/tests/state.rs b/src/tests/state.rs index 119e7037a..03a5e05e1 100644 --- a/src/tests/state.rs +++ b/src/tests/state.rs @@ -73,7 +73,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { declare_test!{StateTests_stBlockHashTest, "StateTests/stBlockHashTest"} declare_test!{StateTests_stCallCodes, "StateTests/stCallCodes"} -declare_test_ignore!{StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"} //<< Out of stack +declare_test!{ignore => StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"} //<< Out of stack declare_test!{StateTests_stDelegatecallTest, "StateTests/stDelegatecallTest"} //<< FAIL - gas too high declare_test!{StateTests_stExample, "StateTests/stExample"} declare_test!{StateTests_stInitCodeTest, "StateTests/stInitCodeTest"} @@ -81,12 +81,12 @@ declare_test!{StateTests_stLogTests, "StateTests/stLogTests"} declare_test!{StateTests_stMemoryStressTest, "StateTests/stMemoryStressTest"} declare_test!{StateTests_stMemoryTest, "StateTests/stMemoryTest"} declare_test!{StateTests_stPreCompiledContracts, "StateTests/stPreCompiledContracts"} -declare_test_ignore!{StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} //<< Too long -declare_test_ignore!{StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} //<< Out of stack +declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} //<< Too long +declare_test!{ignore => StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} //<< Out of stack declare_test!{StateTests_stRefundTest, "StateTests/stRefundTest"} declare_test!{StateTests_stSolidityTest, "StateTests/stSolidityTest"} -declare_test_ignore!{StateTests_stSpecialTest, "StateTests/stSpecialTest"} //<< Signal 11 -declare_test_ignore!{StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} //<< Signal 11 +declare_test!{ignore => StateTests_stSpecialTest, "StateTests/stSpecialTest"} //<< Signal 11 +declare_test!{ignore => StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} //<< Signal 11 declare_test!{StateTests_stTransactionTest, "StateTests/stTransactionTest"} declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"} declare_test!{StateTests_stWalletTest, "StateTests/stWalletTest"} diff --git a/src/tests/test_common.rs b/src/tests/test_common.rs index 15c5364c4..548158fb8 100644 --- a/src/tests/test_common.rs +++ b/src/tests/test_common.rs @@ -1,24 +1,34 @@ pub use common::*; +macro_rules! test { + ($name: expr) => { + assert!(do_json_test(include_bytes!(concat!("../../res/ethereum/tests/", $name, ".json"))).len() == 0); + } +} + #[macro_export] macro_rules! declare_test { - ($id: ident, $name: expr) => { - #[test] - #[allow(non_snake_case)] - fn $id() { - assert!(do_json_test(include_bytes!(concat!("../../res/ethereum/tests/", $name, ".json"))).len() == 0); - } - }; -} - -#[macro_export] -macro_rules! declare_test_ignore { - ($id: ident, $name: expr) => { - #[test] + (ignore => $id: ident, $name: expr) => { #[ignore] + #[test] #[allow(non_snake_case)] fn $id() { - assert!(do_json_test(include_bytes!(concat!("../../res/ethereum/tests/", $name, ".json"))).len() == 0); + test!($name); } }; + (heavy => $id: ident, $name: expr) => { + #[cfg(feature = "test-heavy")] + #[test] + #[allow(non_snake_case)] + fn $id() { + test!($name); + } + }; + ($id: ident, $name: expr) => { + #[test] + #[allow(non_snake_case)] + fn $id() { + test!($name); + } + } } diff --git a/src/tests/transaction.rs b/src/tests/transaction.rs index d4836ca84..1603ad55a 100644 --- a/src/tests/transaction.rs +++ b/src/tests/transaction.rs @@ -70,9 +70,9 @@ declare_test!{TransactionTests/Homestead/ttWrongRLPTransaction} declare_test!{TransactionTests/RandomTests/tr201506052141PYTHON}*/ declare_test!{TransactionTests_ttTransactionTest, "TransactionTests/ttTransactionTest"} -declare_test_ignore!{TransactionTests_tt10mbDataField, "TransactionTests/tt10mbDataField"} +declare_test!{ignore => TransactionTests_tt10mbDataField, "TransactionTests/tt10mbDataField"} declare_test!{TransactionTests_ttWrongRLPTransaction, "TransactionTests/ttWrongRLPTransaction"} declare_test!{TransactionTests_Homestead_ttTransactionTest, "TransactionTests/Homestead/ttTransactionTest"} -declare_test_ignore!{TransactionTests_Homestead_tt10mbDataField, "TransactionTests/Homestead/tt10mbDataField"} +declare_test!{ignore => TransactionTests_Homestead_tt10mbDataField, "TransactionTests/Homestead/tt10mbDataField"} declare_test!{TransactionTests_Homestead_ttWrongRLPTransaction, "TransactionTests/Homestead/ttWrongRLPTransaction"} declare_test!{TransactionTests_RandomTests_tr201506052141PYTHON, "TransactionTests/RandomTests/tr201506052141PYTHON"} From e514d3d80fc196851cf77cc6e82ea48300a304ab Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Jan 2016 16:48:37 +0100 Subject: [PATCH 022/138] Multithreaded event loop --- ethash/src/lib.rs | 12 +- src/bin/client.rs | 11 +- src/client.rs | 6 +- src/service.rs | 10 +- src/sync/chain.rs | 4 + src/sync/io.rs | 10 +- src/sync/mod.rs | 48 +-- util/Cargo.toml | 1 + util/src/io/mod.rs | 32 +- util/src/io/service.rs | 255 +++++++++----- util/src/lib.rs | 1 + util/src/network/connection.rs | 55 ++- util/src/network/handshake.rs | 36 +- util/src/network/host.rs | 626 ++++++++++++++++++--------------- util/src/network/mod.rs | 28 +- util/src/network/service.rs | 17 +- util/src/network/session.rs | 16 +- 17 files changed, 661 insertions(+), 507 deletions(-) diff --git a/ethash/src/lib.rs b/ethash/src/lib.rs index f7b6d2308..e87ee1a03 100644 --- a/ethash/src/lib.rs +++ b/ethash/src/lib.rs @@ -30,11 +30,13 @@ impl EthashManager { /// `nonce` - The nonce to pack into the mix pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork { let epoch = block_number / ETHASH_EPOCH_LENGTH; - if !self.lights.read().unwrap().contains_key(&epoch) { - let mut lights = self.lights.write().unwrap(); // obtain write lock - if !lights.contains_key(&epoch) { - let light = Light::new(block_number); - lights.insert(epoch, light); + while !self.lights.read().unwrap().contains_key(&epoch) { + if let Ok(mut lights) = self.lights.try_write() + { + if !lights.contains_key(&epoch) { + let light = Light::new(block_number); + lights.insert(epoch, light); + } } } self.lights.read().unwrap().get(&epoch).unwrap().compute(header_hash, nonce) diff --git a/src/bin/client.rs b/src/bin/client.rs index a862737be..92106aad4 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -29,7 +29,7 @@ fn main() { setup_log(); let spec = ethereum::new_frontier(); let mut service = ClientService::start(spec).unwrap(); - let io_handler = Box::new(ClientIoHandler { client: service.client(), timer: 0 }); + let io_handler = Arc::new(ClientIoHandler { client: service.client() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); loop { let mut cmd = String::new(); @@ -43,16 +43,15 @@ fn main() { struct ClientIoHandler { client: Arc>, - timer: TimerToken, } impl IoHandler for ClientIoHandler { - fn initialize<'s>(&'s mut self, io: &mut IoContext<'s, NetSyncMessage>) { - self.timer = io.register_timer(5000).expect("Error registering timer"); + fn initialize(&self, io: &IoContext) { + io.register_timer(0, 5000).expect("Error registering timer"); } - fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, timer: TimerToken) { - if self.timer == timer { + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if timer == 0 { println!("Chain info: {:?}", self.client.read().unwrap().deref().chain_info()); } } diff --git a/src/client.rs b/src/client.rs index e02ab37d8..87bdc4416 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,6 +1,5 @@ use util::*; use rocksdb::{Options, DB}; -use rocksdb::DBCompactionStyle::DBUniversalCompaction; use blockchain::{BlockChain, BlockProvider}; use views::BlockView; use error::*; @@ -113,7 +112,9 @@ impl Client { pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); let mut opts = Options::new(); + opts.set_max_open_files(256); opts.create_if_missing(true); + /* opts.set_max_open_files(256); opts.set_use_fsync(false); opts.set_bytes_per_sync(8388608); @@ -131,6 +132,7 @@ impl Client { opts.set_max_background_flushes(4); opts.set_filter_deletes(false); opts.set_disable_auto_compactions(true); + */ let mut state_path = path.to_path_buf(); state_path.push("state"); @@ -219,7 +221,7 @@ impl Client { return; } } - info!(target: "client", "Imported #{} ({})", header.number(), header.hash()); + //info!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } } } diff --git a/src/service.rs b/src/service.rs index 036c99bc4..02f813c2f 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ impl ClientService { dir.push(H64::from(spec.genesis_header().hash()).hex()); let client = Arc::new(RwLock::new(try!(Client::new(spec, &dir, net_service.io().channel())))); EthSync::register(&mut net_service, client.clone()); - let client_io = Box::new(ClientIoHandler { + let client_io = Arc::new(ClientIoHandler { client: client.clone() }); try!(net_service.io().register_handler(client_io)); @@ -48,14 +48,14 @@ struct ClientIoHandler { } impl IoHandler for ClientIoHandler { - fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>) { + fn initialize(&self, _io: &IoContext) { } - fn message<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, net_message: &'s mut NetSyncMessage) { + fn message(&self, _io: &IoContext, net_message: &NetSyncMessage) { match net_message { - &mut UserMessage(ref mut message) => { + &UserMessage(ref message) => { match message { - &mut SyncMessage::BlockVerified => { + &SyncMessage::BlockVerified => { self.client.write().unwrap().import_verified_blocks(); }, _ => {}, // ignore other messages diff --git a/src/sync/chain.rs b/src/sync/chain.rs index 15fe6d1f9..af954e4ad 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -424,6 +424,10 @@ impl ChainSync { let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty; if difficulty > peer_difficulty { trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h); + { + let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer"); + peer.latest = header_view.sha3(); + } self.sync_peer(io, peer_id, true); } } diff --git a/src/sync/io.rs b/src/sync/io.rs index affcbc0d7..f49591a9f 100644 --- a/src/sync/io.rs +++ b/src/sync/io.rs @@ -22,14 +22,14 @@ pub trait SyncIo { } /// Wraps `NetworkContext` and the blockchain client -pub struct NetSyncIo<'s, 'h, 'io> where 'h: 's, 'io: 'h { - network: &'s mut NetworkContext<'h, 'io, SyncMessage>, +pub struct NetSyncIo<'s, 'h> where 'h: 's { + network: &'s NetworkContext<'h, SyncMessage>, chain: &'s mut BlockChainClient } -impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> { +impl<'s, 'h> NetSyncIo<'s, 'h> { /// Creates a new instance from the `NetworkContext` and the blockchain client reference. - pub fn new(network: &'s mut NetworkContext<'h, 'io, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s,'h,'io> { + pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s, 'h> { NetSyncIo { network: network, chain: chain, @@ -37,7 +37,7 @@ impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> { } } -impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> { +impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { fn disable_peer(&mut self, peer_id: PeerId) { self.network.disable_peer(peer_id); } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index da91a6889..5bcaf656e 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -26,9 +26,8 @@ use std::ops::*; use std::sync::*; use client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkIoMessage}; -use util::TimerToken; -use util::Bytes; use sync::chain::ChainSync; +use util::{Bytes, TimerToken}; use sync::io::NetSyncIo; mod chain; @@ -38,10 +37,13 @@ mod range_collection; #[cfg(test)] mod tests; +const SYNC_TIMER: usize = 0; + /// Message type for external events +#[derive(Clone)] pub enum SyncMessage { /// New block has been imported into the blockchain - NewChainBlock(Bytes), + NewChainBlock(Bytes), //TODO: use Cow /// A block is ready BlockVerified, } @@ -53,7 +55,7 @@ pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint chain: Arc>, /// Sync strategy - sync: ChainSync + sync: RwLock } pub use self::chain::SyncStatus; @@ -61,52 +63,50 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service pub fn register(service: &mut NetworkService, chain: Arc>) { - let sync = Box::new(EthSync { + let sync = Arc::new(EthSync { chain: chain, - sync: ChainSync::new(), + sync: RwLock::new(ChainSync::new()), }); - service.register_protocol(sync, "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); + service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); } /// Get sync status pub fn status(&self) -> SyncStatus { - self.sync.status() + self.sync.read().unwrap().status() } /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { - self.sync.abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); } /// Restart sync pub fn restart(&mut self, io: &mut NetworkContext) { - self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); } } impl NetworkProtocolHandler for EthSync { - fn initialize(&mut self, io: &mut NetworkContext) { - self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); - io.register_timer(1000).unwrap(); + fn initialize(&self, io: &NetworkContext) { + io.register_timer(SYNC_TIMER, 1000).unwrap(); } - fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.sync.on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data); + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + self.sync.write().unwrap().on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data); } - fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId) { - self.sync.on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + fn connected(&self, io: &NetworkContext, peer: &PeerId) { + self.sync.write().unwrap().on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); } - fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId) { - self.sync.on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { + self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); } - fn timeout(&mut self, io: &mut NetworkContext, _timer: TimerToken) { - self.sync.maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); - } - - fn message(&mut self, _io: &mut NetworkContext, _message: &SyncMessage) { + fn timeout(&self, io: &NetworkContext, timer: TimerToken) { + if timer == SYNC_TIMER { + self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + } } } diff --git a/util/Cargo.toml b/util/Cargo.toml index 02fdad17f..15dc9523a 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -22,6 +22,7 @@ rust-crypto = "0.2.34" elastic-array = "0.4" heapsize = "0.2" itertools = "0.4" +crossbeam = "0.2" slab = { git = "https://github.com/arkpar/slab.git" } sha3 = { path = "sha3" } diff --git a/util/src/io/mod.rs b/util/src/io/mod.rs index 23a8509cc..71b882fb8 100644 --- a/util/src/io/mod.rs +++ b/util/src/io/mod.rs @@ -36,13 +36,16 @@ /// } /// ``` mod service; +mod worker; + +use mio::{EventLoop, Token}; #[derive(Debug)] pub enum IoError { Mio(::std::io::Error), } -impl From<::mio::NotifyError>> for IoError where Message: Send { +impl From<::mio::NotifyError>> for IoError where Message: Send + Clone { fn from(_err: ::mio::NotifyError>) -> IoError { IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error")) } @@ -51,27 +54,32 @@ impl From<::mio::NotifyError>> for IoError /// Generic IO handler. /// All the handler function are called from within IO event loop. /// `Message` type is used as notification data -pub trait IoHandler: Send where Message: Send + 'static { +pub trait IoHandler: Send + Sync where Message: Send + Sync + Clone + 'static { /// Initialize the handler - fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, Message>) {} + fn initialize(&self, _io: &IoContext) {} /// Timer function called after a timeout created with `HandlerIo::timeout`. - fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _timer: TimerToken) {} + fn timeout(&self, _io: &IoContext, _timer: TimerToken) {} /// Called when a broadcasted message is received. The message can only be sent from a different IO handler. - fn message<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _message: &'s mut Message) {} // TODO: make message immutable and provide internal channel for adding network handler + fn message(&self, _io: &IoContext, _message: &Message) {} /// Called when an IO stream gets closed - fn stream_hup<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {} + fn stream_hup(&self, _io: &IoContext, _stream: StreamToken) {} /// Called when an IO stream can be read from - fn stream_readable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {} + fn stream_readable(&self, _io: &IoContext, _stream: StreamToken) {} /// Called when an IO stream can be written to - fn stream_writable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {} + fn stream_writable(&self, _io: &IoContext, _stream: StreamToken) {} + /// Register a new stream with the event loop + fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} + /// Re-register a stream with the event loop + fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} } pub type TimerToken = service::TimerToken; pub type StreamToken = service::StreamToken; -pub type IoContext<'s, M> = service::IoContext<'s, M>; -pub type IoService = service::IoService; -pub type IoChannel = service::IoChannel; -//pub const USER_TOKEN_START: usize = service::USER_TOKEN; // TODO: ICE in rustc 1.7.0-nightly (49c382779 2016-01-12) +pub use io::service::IoContext; +pub type IoService = service::IoService; +pub type IoChannel = service::IoChannel; +pub type IoManager = service::IoManager; +pub const TOKENS_PER_HANDLER: usize = service::TOKENS_PER_HANDLER; #[cfg(test)] mod tests { diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 4a96d19a7..a229e4022 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -1,148 +1,221 @@ +use std::sync::*; use std::thread::{self, JoinHandle}; +use std::collections::HashMap; use mio::*; -use mio::util::{Slab}; use hash::*; use rlp::*; use error::*; use io::{IoError, IoHandler}; +use arrayvec::*; +use crossbeam::sync::chase_lev; +use io::worker::{Worker, Work, WorkType}; pub type TimerToken = usize; pub type StreamToken = usize; +pub type HandlerId = usize; // Tokens -const MAX_USER_TIMERS: usize = 32; -const USER_TIMER: usize = 0; -const LAST_USER_TIMER: usize = USER_TIMER + MAX_USER_TIMERS - 1; -//const USER_TOKEN: usize = LAST_USER_TIMER + 1; +pub const TOKENS_PER_HANDLER: usize = 16384; /// Messages used to communicate with the event loop from other threads. -pub enum IoMessage where Message: Send + Sized { +#[derive(Clone)] +pub enum IoMessage where Message: Send + Clone + Sized { /// Shutdown the event loop Shutdown, /// Register a new protocol handler. AddHandler { - handler: Box+Send>, + handler: Arc+Send>, + }, + AddTimer { + handler_id: HandlerId, + token: TimerToken, + delay: u64, + }, + RemoveTimer { + handler_id: HandlerId, + token: TimerToken, + }, + RegisterStream { + handler_id: HandlerId, + token: StreamToken, + }, + UpdateStreamRegistration { + handler_id: HandlerId, + token: StreamToken, }, /// Broadcast a message across all protocol handlers. UserMessage(Message) } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. -pub struct IoContext<'s, Message> where Message: Send + 'static { - timers: &'s mut Slab, - /// Low leve MIO Event loop for custom handler registration. - pub event_loop: &'s mut EventLoop>, +pub struct IoContext where Message: Send + Clone + 'static { + pub channel: IoChannel, + pub handler: HandlerId, } -impl<'s, Message> IoContext<'s, Message> where Message: Send + 'static { +impl IoContext where Message: Send + Clone + 'static { /// Create a new IO access point. Takes references to all the data that can be updated within the IO handler. - fn new(event_loop: &'s mut EventLoop>, timers: &'s mut Slab) -> IoContext<'s, Message> { + pub fn new(channel: IoChannel, handler: HandlerId) -> IoContext { IoContext { - event_loop: event_loop, - timers: timers, + handler: handler, + channel: channel, } } - /// Register a new IO timer. Returns a new timer token. 'IoHandler::timeout' will be called with the token. - pub fn register_timer(&mut self, ms: u64) -> Result { - match self.timers.insert(UserTimer { + /// Register a new IO timer. 'IoHandler::timeout' will be called with the token. + pub fn register_timer(&self, token: TimerToken, ms: u64) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::AddTimer { + token: token, delay: ms, - }) { - Ok(token) => { - self.event_loop.timeout_ms(token, ms).expect("Error registering user timer"); - Ok(token.as_usize()) - }, - _ => { panic!("Max timers reached") } - } + handler_id: self.handler, + })); + Ok(()) + } + + /// Delete a timer. + pub fn clear_timer(&self, token: TimerToken) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::RemoveTimer { + token: token, + handler_id: self.handler, + })); + Ok(()) + } + /// Register a new IO stream. + pub fn register_stream(&self, token: StreamToken) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::RegisterStream { + token: token, + handler_id: self.handler, + })); + Ok(()) + } + + /// Reregister an IO stream. + pub fn update_registration(&self, token: StreamToken) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::UpdateStreamRegistration { + token: token, + handler_id: self.handler, + })); + Ok(()) } /// Broadcast a message to other IO clients - pub fn message(&mut self, message: Message) { - match self.event_loop.channel().send(IoMessage::UserMessage(message)) { - Ok(_) => {} - Err(e) => { panic!("Error sending io message {:?}", e); } - } + pub fn message(&self, message: Message) { + self.channel.send(message).expect("Error seding message"); } } +#[derive(Clone)] struct UserTimer { delay: u64, + timeout: Timeout, } /// Root IO handler. Manages user handlers, messages and IO timers. -pub struct IoManager where Message: Send { - timers: Slab, - handlers: Vec>>, +pub struct IoManager where Message: Send + Sync { + timers: Arc>>, + handlers: Vec>>, + _workers: Vec, + worker_channel: chase_lev::Worker>, + work_ready: Arc, } -impl IoManager where Message: Send + 'static { +impl IoManager where Message: Send + Sync + Clone + 'static { /// Creates a new instance and registers it with the event loop. pub fn start(event_loop: &mut EventLoop>) -> Result<(), UtilError> { + let (worker, stealer) = chase_lev::deque(); + let num_workers = 4; + let work_ready_mutex = Arc::new(Mutex::new(false)); + let work_ready = Arc::new(Condvar::new()); + let workers = (0..num_workers).map(|i| + Worker::new(i, stealer.clone(), IoChannel::new(event_loop.channel()), work_ready.clone(), work_ready_mutex.clone())).collect(); + let mut io = IoManager { - timers: Slab::new_starting_at(Token(USER_TIMER), MAX_USER_TIMERS), + timers: Arc::new(RwLock::new(HashMap::new())), handlers: Vec::new(), + worker_channel: worker, + _workers: workers, + work_ready: work_ready, }; try!(event_loop.run(&mut io)); Ok(()) } } -impl Handler for IoManager where Message: Send + 'static { +impl Handler for IoManager where Message: Send + Clone + Sync + 'static { type Timeout = Token; type Message = IoMessage; - fn ready(&mut self, event_loop: &mut EventLoop, token: Token, events: EventSet) { + fn ready(&mut self, _event_loop: &mut EventLoop, token: Token, events: EventSet) { + let handler_index = token.as_usize() / TOKENS_PER_HANDLER; + let token_id = token.as_usize() % TOKENS_PER_HANDLER; + if handler_index >= self.handlers.len() { + panic!("Unexpected stream token: {}", token.as_usize()); + } + let handler = self.handlers[handler_index].clone(); + if events.is_hup() { - for h in self.handlers.iter_mut() { - h.stream_hup(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - } - else if events.is_readable() { - for h in self.handlers.iter_mut() { - h.stream_readable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - } - else if events.is_writable() { - for h in self.handlers.iter_mut() { - h.stream_writable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); + self.worker_channel.push(Work { work_type: WorkType::Hup, token: token_id, handler: handler.clone(), handler_id: handler_index }); + } + else { + if events.is_readable() { + self.worker_channel.push(Work { work_type: WorkType::Readable, token: token_id, handler: handler.clone(), handler_id: handler_index }); + } + if events.is_writable() { + self.worker_channel.push(Work { work_type: WorkType::Writable, token: token_id, handler: handler.clone(), handler_id: handler_index }); } } + self.work_ready.notify_all(); } fn timeout(&mut self, event_loop: &mut EventLoop, token: Token) { - match token.as_usize() { - USER_TIMER ... LAST_USER_TIMER => { - let delay = { - let timer = self.timers.get_mut(token).expect("Unknown user timer token"); - timer.delay - }; - for h in self.handlers.iter_mut() { - h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - event_loop.timeout_ms(token, delay).expect("Error re-registering user timer"); - } - _ => { // Just pass the event down. IoHandler is supposed to re-register it if required. - for h in self.handlers.iter_mut() { - h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - } + let handler_index = token.as_usize() / TOKENS_PER_HANDLER; + let token_id = token.as_usize() % TOKENS_PER_HANDLER; + if handler_index >= self.handlers.len() { + panic!("Unexpected timer token: {}", token.as_usize()); + } + if let Some(timer) = self.timers.read().unwrap().get(&token.as_usize()) { + event_loop.timeout_ms(token, timer.delay).expect("Error re-registering user timer"); + let handler = self.handlers[handler_index].clone(); + self.worker_channel.push(Work { work_type: WorkType::Timeout, token: token_id, handler: handler, handler_id: handler_index }); + self.work_ready.notify_all(); } } fn notify(&mut self, event_loop: &mut EventLoop, msg: Self::Message) { - let mut m = msg; - match m { + match msg { IoMessage::Shutdown => event_loop.shutdown(), - IoMessage::AddHandler { - handler, - } => { - self.handlers.push(handler); - self.handlers.last_mut().unwrap().initialize(&mut IoContext::new(event_loop, &mut self.timers)); + IoMessage::AddHandler { handler } => { + let handler_id = { + self.handlers.push(handler.clone()); + self.handlers.len() - 1 + }; + handler.initialize(&IoContext::new(IoChannel::new(event_loop.channel()), handler_id)); }, - IoMessage::UserMessage(ref mut data) => { - for h in self.handlers.iter_mut() { - h.message(&mut IoContext::new(event_loop, &mut self.timers), data); + IoMessage::AddTimer { handler_id, token, delay } => { + let timer_id = token + handler_id * TOKENS_PER_HANDLER; + let timeout = event_loop.timeout_ms(Token(timer_id), delay).expect("Error registering user timer"); + self.timers.write().unwrap().insert(timer_id, UserTimer { delay: delay, timeout: timeout }); + }, + IoMessage::RemoveTimer { handler_id, token } => { + let timer_id = token + handler_id * TOKENS_PER_HANDLER; + if let Some(timer) = self.timers.write().unwrap().remove(&timer_id) { + event_loop.clear_timeout(timer.timeout); } + }, + IoMessage::RegisterStream { handler_id, token } => { + let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone(); + handler.register_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); + }, + IoMessage::UpdateStreamRegistration { handler_id, token } => { + let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone(); + handler.update_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); + }, + IoMessage::UserMessage(data) => { + for n in 0 .. self.handlers.len() { + let handler = self.handlers[n].clone(); + self.worker_channel.push(Work { work_type: WorkType::Message(data.clone()), token: 0, handler: handler, handler_id: n }); + } + self.work_ready.notify_all(); } } } @@ -150,11 +223,19 @@ impl Handler for IoManager where Message: Send + 'static { /// Allows sending messages into the event loop. All the IO handlers will get the message /// in the `message` callback. -pub struct IoChannel where Message: Send { +pub struct IoChannel where Message: Send + Clone{ channel: Option>> } -impl IoChannel where Message: Send { +impl Clone for IoChannel where Message: Send + Clone { + fn clone(&self) -> IoChannel { + IoChannel { + channel: self.channel.clone() + } + } +} + +impl IoChannel where Message: Send + Clone { /// Send a msessage through the channel pub fn send(&self, message: Message) -> Result<(), IoError> { if let Some(ref channel) = self.channel { @@ -163,20 +244,30 @@ impl IoChannel where Message: Send { Ok(()) } + pub fn send_io(&self, message: IoMessage) -> Result<(), IoError> { + if let Some(ref channel) = self.channel { + try!(channel.send(message)) + } + Ok(()) + } /// Create a new channel to connected to event loop. pub fn disconnected() -> IoChannel { IoChannel { channel: None } } + + fn new(channel: Sender>) -> IoChannel { + IoChannel { channel: Some(channel) } + } } /// General IO Service. Starts an event loop and dispatches IO requests. /// 'Message' is a notification message type -pub struct IoService where Message: Send + 'static { +pub struct IoService where Message: Send + Sync + Clone + 'static { thread: Option>, - host_channel: Sender> + host_channel: Sender>, } -impl IoService where Message: Send + 'static { +impl IoService where Message: Send + Sync + Clone + 'static { /// Starts IO event loop pub fn start() -> Result, UtilError> { let mut event_loop = EventLoop::new().unwrap(); @@ -191,7 +282,7 @@ impl IoService where Message: Send + 'static { } /// Regiter a IO hadnler with the event loop. - pub fn register_handler(&mut self, handler: Box+Send>) -> Result<(), IoError> { + pub fn register_handler(&mut self, handler: Arc+Send>) -> Result<(), IoError> { try!(self.host_channel.send(IoMessage::AddHandler { handler: handler, })); @@ -210,7 +301,7 @@ impl IoService where Message: Send + 'static { } } -impl Drop for IoService where Message: Send { +impl Drop for IoService where Message: Send + Sync + Clone { fn drop(&mut self) { self.host_channel.send(IoMessage::Shutdown).unwrap(); self.thread.take().unwrap().join().unwrap(); diff --git a/util/src/lib.rs b/util/src/lib.rs index 4bc47e61c..2161af34f 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -51,6 +51,7 @@ extern crate crypto as rcrypto; extern crate secp256k1; extern crate arrayvec; extern crate elastic_array; +extern crate crossbeam; pub mod standard; #[macro_use] diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index f11c10384..a2c77e8cf 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -1,5 +1,5 @@ use std::collections::VecDeque; -use mio::{Handler, Token, EventSet, EventLoop, Timeout, PollOpt, TryRead, TryWrite}; +use mio::{Handler, Token, EventSet, EventLoop, PollOpt, TryRead, TryWrite}; use mio::tcp::*; use hash::*; use sha3::*; @@ -7,6 +7,7 @@ use bytes::*; use rlp::*; use std::io::{self, Cursor, Read}; use error::*; +use io::{IoContext, StreamToken}; use network::error::NetworkError; use network::handshake::Handshake; use crypto; @@ -17,11 +18,12 @@ use rcrypto::buffer::*; use tiny_keccak::Keccak; const ENCRYPTED_HEADER_LEN: usize = 32; +const RECIEVE_PAYLOAD_TIMEOUT: u64 = 30000; /// Low level tcp connection pub struct Connection { /// Connection id (token) - pub token: Token, + pub token: StreamToken, /// Network socket pub socket: TcpStream, /// Receive buffer @@ -45,14 +47,14 @@ pub enum WriteStatus { impl Connection { /// Create a new connection with given id and socket. - pub fn new(token: Token, socket: TcpStream) -> Connection { + pub fn new(token: StreamToken, socket: TcpStream) -> Connection { Connection { token: token, socket: socket, send_queue: VecDeque::new(), rec_buf: Bytes::new(), rec_size: 0, - interest: EventSet::hup(), + interest: EventSet::hup() | EventSet::readable(), } } @@ -132,20 +134,19 @@ impl Connection { } /// Register this connection with the IO event loop. - pub fn register(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection register; token={:?}", self.token); - self.interest.insert(EventSet::readable()); - event_loop.register(&self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - error!("Failed to register {:?}, {:?}", self.token, e); + pub fn register_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { + trace!(target: "net", "connection register; token={:?}", reg); + event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { + error!("Failed to register {:?}, {:?}", reg, e); Err(e) }) } /// Update connection registration. Should be called at the end of the IO handler. - pub fn reregister(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection reregister; token={:?}", self.token); - event_loop.reregister( &self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - error!("Failed to reregister {:?}, {:?}", self.token, e); + pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { + trace!(target: "net", "connection reregister; token={:?}", reg); + event_loop.reregister( &self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { + error!("Failed to reregister {:?}, {:?}", reg, e); Err(e) }) } @@ -182,8 +183,6 @@ pub struct EncryptedConnection { ingress_mac: Keccak, /// Read state read_state: EncryptedConnectionState, - /// Disconnect timeout - idle_timeout: Option, /// Protocol id for the last received packet protocol_id: u16, /// Payload expected to be received for the last header. @@ -192,7 +191,7 @@ pub struct EncryptedConnection { impl EncryptedConnection { /// Create an encrypted connection out of the handshake. Consumes a handshake object. - pub fn new(handshake: Handshake) -> Result { + pub fn new(mut handshake: Handshake) -> Result { let shared = try!(crypto::ecdh::agree(handshake.ecdhe.secret(), &handshake.remote_public)); let mut nonce_material = H512::new(); if handshake.originated { @@ -227,6 +226,7 @@ impl EncryptedConnection { ingress_mac.update(&mac_material); ingress_mac.update(if handshake.originated { &handshake.ack_cipher } else { &handshake.auth_cipher }); + handshake.connection.expect(ENCRYPTED_HEADER_LEN); Ok(EncryptedConnection { connection: handshake.connection, encoder: encoder, @@ -235,7 +235,6 @@ impl EncryptedConnection { egress_mac: egress_mac, ingress_mac: ingress_mac, read_state: EncryptedConnectionState::Header, - idle_timeout: None, protocol_id: 0, payload_len: 0 }) @@ -337,13 +336,14 @@ impl EncryptedConnection { } /// Readable IO handler. Tracker receive status and returns decoded packet if avaialable. - pub fn readable(&mut self, event_loop: &mut EventLoop) -> Result, UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn readable(&mut self, io: &IoContext) -> Result, UtilError> where Message: Send + Clone{ + io.clear_timer(self.connection.token).unwrap(); match self.read_state { EncryptedConnectionState::Header => { match try!(self.connection.readable()) { Some(data) => { try!(self.read_header(&data)); + try!(io.register_timer(self.connection.token, RECIEVE_PAYLOAD_TIMEOUT)); }, None => {} }; @@ -363,24 +363,15 @@ impl EncryptedConnection { } /// Writable IO handler. Processes send queeue. - pub fn writable(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn writable(&mut self, io: &IoContext) -> Result<(), UtilError> where Message: Send + Clone { + io.clear_timer(self.connection.token).unwrap(); try!(self.connection.writable()); Ok(()) } - /// Register this connection with the event handler. - pub fn register>(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.connection.expect(ENCRYPTED_HEADER_LEN); - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); - self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok(); - try!(self.connection.reregister(event_loop)); - Ok(()) - } - /// Update connection registration. This should be called at the end of the event loop. - pub fn reregister(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - try!(self.connection.reregister(event_loop)); + pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.update_socket(reg, event_loop)); Ok(()) } } diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index ca95808b4..ea04a5462 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -10,6 +10,7 @@ use network::host::{HostInfo}; use network::node::NodeId; use error::*; use network::error::NetworkError; +use io::{IoContext, StreamToken}; #[derive(PartialEq, Eq, Debug)] enum HandshakeState { @@ -33,8 +34,6 @@ pub struct Handshake { state: HandshakeState, /// Outgoing or incoming connection pub originated: bool, - /// Disconnect timeout - idle_timeout: Option, /// ECDH ephemeral pub ecdhe: KeyPair, /// Connection nonce @@ -51,16 +50,16 @@ pub struct Handshake { const AUTH_PACKET_SIZE: usize = 307; const ACK_PACKET_SIZE: usize = 210; +const HANDSHAKE_TIMEOUT: u64 = 30000; impl Handshake { /// Create a new handshake object - pub fn new(token: Token, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result { + pub fn new(token: StreamToken, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result { Ok(Handshake { id: id.clone(), connection: Connection::new(token, socket), originated: false, state: HandshakeState::New, - idle_timeout: None, ecdhe: try!(KeyPair::create()), nonce: nonce.clone(), remote_public: Public::new(), @@ -71,8 +70,9 @@ impl Handshake { } /// Start a handhsake - pub fn start(&mut self, host: &HostInfo, originated: bool) -> Result<(), UtilError> { + pub fn start(&mut self, io: &IoContext, host: &HostInfo, originated: bool) -> Result<(), UtilError> where Message: Send + Clone{ self.originated = originated; + io.register_timer(self.connection.token, HANDSHAKE_TIMEOUT).ok(); if originated { try!(self.write_auth(host)); } @@ -89,8 +89,8 @@ impl Handshake { } /// Readable IO handler. Drives the state change. - pub fn readable(&mut self, event_loop: &mut EventLoop, host: &HostInfo) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn readable(&mut self, io: &IoContext, host: &HostInfo) -> Result<(), UtilError> where Message: Send + Clone { + io.clear_timer(self.connection.token).unwrap(); match self.state { HandshakeState::ReadingAuth => { match try!(self.connection.readable()) { @@ -110,29 +110,33 @@ impl Handshake { None => {} }; }, + HandshakeState::StartSession => {}, _ => { panic!("Unexpected state"); } } if self.state != HandshakeState::StartSession { - try!(self.connection.reregister(event_loop)); + try!(io.update_registration(self.connection.token)); } Ok(()) } /// Writabe IO handler. - pub fn writable(&mut self, event_loop: &mut EventLoop, _host: &HostInfo) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn writable(&mut self, io: &IoContext, _host: &HostInfo) -> Result<(), UtilError> where Message: Send + Clone { + io.clear_timer(self.connection.token).unwrap(); try!(self.connection.writable()); if self.state != HandshakeState::StartSession { - try!(self.connection.reregister(event_loop)); + io.update_registration(self.connection.token).unwrap(); } Ok(()) } - /// Register the IO handler with the event loop - pub fn register>(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); - self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok(); - try!(self.connection.register(event_loop)); + /// Register the socket with the event loop + pub fn register_socket>(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.register_socket(reg, event_loop)); + Ok(()) + } + + pub fn update_socket>(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.update_socket(reg, event_loop)); Ok(()) } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 37b58f1f0..f83cb1908 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -1,8 +1,9 @@ -use std::mem; use std::net::{SocketAddr}; use std::collections::{HashMap}; use std::hash::{Hasher}; use std::str::{FromStr}; +use std::sync::*; +use std::ops::*; use mio::*; use mio::tcp::*; use mio::udp::*; @@ -64,14 +65,20 @@ pub type PacketId = u8; pub type ProtocolId = &'static str; /// Messages used to communitate with the event loop from other threads. -pub enum NetworkIoMessage where Message: Send { +#[derive(Clone)] +pub enum NetworkIoMessage where Message: Send + Sync + Clone { /// Register a new protocol handler. AddHandler { - handler: Option+Send>>, + handler: Arc + Sync>, protocol: ProtocolId, versions: Vec, }, - /// Send data over the network. + AddTimer { + protocol: ProtocolId, + token: TimerToken, + delay: u64, + }, + /// Send data over the network. // TODO: remove this Send { peer: PeerId, packet_id: PacketId, @@ -104,46 +111,45 @@ impl Encodable for CapabilityInfo { } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. -pub struct NetworkContext<'s, 'io, Message> where Message: Send + 'static, 'io: 's { - io: &'s mut IoContext<'io, NetworkIoMessage>, +pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, 's { + io: &'s IoContext>, protocol: ProtocolId, - connections: &'s mut Slab, - timers: &'s mut HashMap, + connections: Arc>>, session: Option, } -impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 'static, { +impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, { /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. - fn new(io: &'s mut IoContext<'io, NetworkIoMessage>, + fn new(io: &'s IoContext>, protocol: ProtocolId, - session: Option, connections: &'s mut Slab, - timers: &'s mut HashMap) -> NetworkContext<'s, 'io, Message> { + session: Option, connections: Arc>>) -> NetworkContext<'s, Message> { NetworkContext { io: io, protocol: protocol, session: session, connections: connections, - timers: timers, } } /// Send a packet over the network to another peer. - pub fn send(&mut self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { - match self.connections.get_mut(peer) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "net", "Send error: {:?}", e); - }); //TODO: don't copy vector data - }, - _ => { - warn!(target: "net", "Send: Peer does not exist"); + pub fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { + if let Some(connection) = self.connections.read().unwrap().get(peer).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Session(ref mut s) => { + s.send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| { + warn!(target: "net", "Send error: {:?}", e); + }); //TODO: don't copy vector data + }, + _ => warn!(target: "net", "Send: Peer is not connected yet") } + } else { + warn!(target: "net", "Send: Peer does not exist") } Ok(()) } /// Respond to a current network message. Panics if no there is no packet in the context. - pub fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { + pub fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { match self.session { Some(session) => self.send(session, packet_id, data), None => { @@ -153,31 +159,31 @@ impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 's } /// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected. - pub fn disable_peer(&mut self, _peer: PeerId) { + pub fn disable_peer(&self, _peer: PeerId) { //TODO: remove capability, disconnect if no capabilities left } - /// Register a new IO timer. Returns a new timer token. 'NetworkProtocolHandler::timeout' will be called with the token. - pub fn register_timer(&mut self, ms: u64) -> Result{ - match self.io.register_timer(ms) { - Ok(token) => { - self.timers.insert(token, self.protocol); - Ok(token) - }, - e => e, - } + /// Register a new IO timer. 'IoHandler::timeout' will be called with the token. + pub fn register_timer(&self, token: TimerToken, ms: u64) -> Result<(), UtilError> { + self.io.message(NetworkIoMessage::AddTimer { + token: token, + delay: ms, + protocol: self.protocol, + }); + Ok(()) } /// Returns peer identification string pub fn peer_info(&self, peer: PeerId) -> String { - match self.connections.get(peer) { - Some(&ConnectionEntry::Session(ref s)) => { - s.info.client_version.clone() - }, - _ => { - "unknown".to_string() + if let Some(connection) = self.connections.read().unwrap().get(peer).map(|c| c.clone()) { + match connection.lock().unwrap().deref() { + &ConnectionEntry::Session(ref s) => { + return s.info.client_version.clone() + }, + _ => {} } } + "unknown".to_string() } } @@ -222,26 +228,35 @@ enum ConnectionEntry { Session(Session) } -/// Root IO handler. Manages protocol handlers, IO timers and network connections. -pub struct Host where Message: Send { - pub info: HostInfo, - udp_socket: UdpSocket, - listener: TcpListener, - connections: Slab, - timers: HashMap, - nodes: HashMap, - handlers: HashMap>>, +type SharedConnectionEntry = Arc>; + +#[derive(Copy, Clone)] +struct ProtocolTimer { + pub protocol: ProtocolId, + pub token: TimerToken, // Handler level token } -impl Host where Message: Send { +/// Root IO handler. Manages protocol handlers, IO timers and network connections. +pub struct Host where Message: Send + Sync + Clone { + pub info: RwLock, + udp_socket: Mutex, + tcp_listener: Mutex, + connections: Arc>>, + nodes: RwLock>, + handlers: RwLock>>>, + timers: RwLock>, + timer_counter: RwLock, +} + +impl Host where Message: Send + Sync + Clone { pub fn new() -> Host { let config = NetworkConfiguration::new(); let addr = config.listen_address; // Setup the server socket - let listener = TcpListener::bind(&addr).unwrap(); + let tcp_listener = TcpListener::bind(&addr).unwrap(); let udp_socket = UdpSocket::bound(&addr).unwrap(); - Host:: { - info: HostInfo { + let mut host = Host:: { + info: RwLock::new(HostInfo { keys: KeyPair::create().unwrap(), config: config, nonce: H256::random(), @@ -249,39 +264,64 @@ impl Host where Message: Send { client_version: "parity".to_string(), listen_port: 0, capabilities: Vec::new(), - }, - udp_socket: udp_socket, - listener: listener, - connections: Slab::new_starting_at(FIRST_CONNECTION, MAX_CONNECTIONS), - timers: HashMap::new(), - nodes: HashMap::new(), - handlers: HashMap::new(), - } + }), + udp_socket: Mutex::new(udp_socket), + tcp_listener: Mutex::new(tcp_listener), + connections: Arc::new(RwLock::new(Slab::new_starting_at(FIRST_CONNECTION, MAX_CONNECTIONS))), + nodes: RwLock::new(HashMap::new()), + handlers: RwLock::new(HashMap::new()), + timers: RwLock::new(HashMap::new()), + timer_counter: RwLock::new(LAST_CONNECTION + 1), + }; + let port = host.info.read().unwrap().config.listen_address.port(); + host.info.write().unwrap().deref_mut().listen_port = port; + + /* + match ::ifaces::Interface::get_all().unwrap().into_iter().filter(|x| x.kind == ::ifaces::Kind::Packet && x.addr.is_some()).next() { + Some(iface) => config.public_address = iface.addr.unwrap(), + None => warn!("No public network interface"), + */ + + // self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303"); + // GO bootnodes + host.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE + host.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR + host.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG + // ETH/DEV cpp-ethereum (poc-9.ethdev.com) + host.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"); + host } - fn add_node(&mut self, id: &str) { + pub fn add_node(&mut self, id: &str) { match Node::from_str(id) { Err(e) => { warn!("Could not add node: {:?}", e); }, Ok(n) => { - self.nodes.insert(n.id.clone(), n); + self.nodes.write().unwrap().insert(n.id.clone(), n); } } } - fn maintain_network(&mut self, io: &mut IoContext>) { + pub fn client_version(&self) -> String { + self.info.read().unwrap().client_version.clone() + } + + pub fn client_id(&self) -> NodeId { + self.info.read().unwrap().id().clone() + } + + fn maintain_network(&self, io: &IoContext>) { self.connect_peers(io); - io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap(); } fn have_session(&self, id: &NodeId) -> bool { - self.connections.iter().any(|e| match e { &ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false }) + self.connections.read().unwrap().iter().any(|e| match e.lock().unwrap().deref() { &ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false }) } fn connecting_to(&self, id: &NodeId) -> bool { - self.connections.iter().any(|e| match e { &ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false }) + self.connections.read().unwrap().iter().any(|e| match e.lock().unwrap().deref() { &ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false }) } - fn connect_peers(&mut self, io: &mut IoContext>) { + fn connect_peers(&self, io: &IoContext>) { struct NodeInfo { id: NodeId, peer_type: PeerType @@ -292,13 +332,14 @@ impl Host where Message: Send { let mut req_conn = 0; //TODO: use nodes from discovery here //for n in self.node_buckets.iter().flat_map(|n| &n.nodes).map(|id| NodeInfo { id: id.clone(), peer_type: self.nodes.get(id).unwrap().peer_type}) { - for n in self.nodes.values().map(|n| NodeInfo { id: n.id.clone(), peer_type: n.peer_type }) { + let pin = self.info.read().unwrap().deref().config.pin; + for n in self.nodes.read().unwrap().values().map(|n| NodeInfo { id: n.id.clone(), peer_type: n.peer_type }) { let connected = self.have_session(&n.id) || self.connecting_to(&n.id); let required = n.peer_type == PeerType::Required; if connected && required { req_conn += 1; } - else if !connected && (!self.info.config.pin || required) { + else if !connected && (!pin || required) { to_connect.push(n); } } @@ -312,8 +353,7 @@ impl Host where Message: Send { } } - if !self.info.config.pin - { + if !pin { let pending_count = 0; //TODO: let peer_count = 0; let mut open_slots = IDEAL_PEERS - peer_count - pending_count + req_conn; @@ -328,23 +368,24 @@ impl Host where Message: Send { } } - fn connect_peer(&mut self, id: &NodeId, io: &mut IoContext>) { - if self.have_session(id) - { + fn connect_peer(&self, id: &NodeId, io: &IoContext>) { + if self.have_session(id) { warn!("Aborted connect. Node already connected."); return; } - if self.connecting_to(id) - { + if self.connecting_to(id) { warn!("Aborted connect. Node already connecting."); return; } let socket = { - let node = self.nodes.get_mut(id).unwrap(); - node.last_attempted = Some(::time::now()); - - match TcpStream::connect(&node.endpoint.address) { + let address = { + let mut nodes = self.nodes.write().unwrap(); + let node = nodes.get_mut(id).unwrap(); + node.last_attempted = Some(::time::now()); + node.endpoint.address + }; + match TcpStream::connect(&address) { Ok(socket) => socket, Err(_) => { warn!("Cannot connect to node"); @@ -353,224 +394,186 @@ impl Host where Message: Send { } }; - let nonce = self.info.next_nonce(); - match self.connections.insert_with(|token| ConnectionEntry::Handshake(Handshake::new(Token(token), id, socket, &nonce).expect("Can't create handshake"))) { - Some(token) => { - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(ref mut h)) => { - h.start(&self.info, true) - .and_then(|_| h.register(io.event_loop)) - .unwrap_or_else (|e| { - debug!(target: "net", "Handshake create error: {:?}", e); - }); - }, - _ => {} - } - }, - None => { warn!("Max connections reached") } + let nonce = self.info.write().unwrap().next_nonce(); + if self.connections.write().unwrap().insert_with(|token| { + let mut handshake = Handshake::new(token, id, socket, &nonce).expect("Can't create handshake"); + handshake.start(io, &self.info.read().unwrap(), true).and_then(|_| io.register_stream(token)).unwrap_or_else (|e| { + debug!(target: "net", "Handshake create error: {:?}", e); + }); + Arc::new(Mutex::new(ConnectionEntry::Handshake(handshake))) + }).is_none() { + warn!("Max connections reached"); } } - - fn accept(&mut self, _io: &mut IoContext>) { + fn accept(&self, _io: &IoContext>) { trace!(target: "net", "accept"); } - fn connection_writable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { - let mut kill = false; + fn connection_writable(&self, token: StreamToken, io: &IoContext>) { let mut create_session = false; - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(ref mut h)) => { - h.writable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Handshake write error: {:?}", e); - kill = true; - }); - create_session = h.done(); - }, - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.writable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Session write error: {:?}", e); - kill = true; - }); + let mut kill = false; + if let Some(connection) = self.connections.read().unwrap().get(token).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Handshake(ref mut h) => { + match h.writable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Handshake write error: {:?}", e); + kill = true; + }, + Ok(_) => () + } + if h.done() { + create_session = true; + } + }, + &mut ConnectionEntry::Session(ref mut s) => { + match s.writable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Session write error: {:?}", e); + kill = true; + }, + Ok(_) => () + } + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); + } } - _ => { - warn!(target: "net", "Received event for unknown connection"); - } - } + } else { warn!(target: "net", "Received event for unknown connection") } if kill { - self.kill_connection(token, io); + self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection return; } else if create_session { self.start_session(token, io); - } - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); - }, - _ => (), + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); } } - fn connection_closed<'s>(&'s mut self, token: TimerToken, io: &mut IoContext<'s, NetworkIoMessage>) { + fn connection_closed(&self, token: TimerToken, io: &IoContext>) { self.kill_connection(token, io); } - fn connection_readable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { - let mut kill = false; - let mut create_session = false; + fn connection_readable(&self, token: StreamToken, io: &IoContext>) { let mut ready_data: Vec = Vec::new(); let mut packet_data: Option<(ProtocolId, PacketId, Vec)> = None; - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(ref mut h)) => { - h.readable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Handshake read error: {:?}", e); - kill = true; - }); - create_session = h.done(); - }, - Some(&mut ConnectionEntry::Session(ref mut s)) => { - let sd = { s.readable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Session read error: {:?}", e); - kill = true; - SessionData::None - }) }; - match sd { - SessionData::Ready => { - for (p, _) in self.handlers.iter_mut() { - if s.have_capability(p) { - ready_data.push(p); + let mut create_session = false; + let mut kill = false; + if let Some(connection) = self.connections.read().unwrap().get(token).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Handshake(ref mut h) => { + match h.readable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Handshake read error: {:?}", e); + kill = true; + }, + Ok(_) => () + } + if h.done() { + create_session = true; + } + }, + &mut ConnectionEntry::Session(ref mut s) => { + match s.readable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Handshake read error: {:?}", e); + kill = true; + }, + Ok(SessionData::Ready) => { + for (p, _) in self.handlers.read().unwrap().iter() { + if s.have_capability(p) { + ready_data.push(p); + } } - } - }, - SessionData::Packet { - data, - protocol, - packet_id, - } => { - match self.handlers.get_mut(protocol) { - None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) }, - Some(_) => packet_data = Some((protocol, packet_id, data)), - } - }, - SessionData::None => {}, - } - } - _ => { - warn!(target: "net", "Received event for unknown connection"); - } - } - if kill { - self.kill_connection(token, io); - return; - } - if create_session { - self.start_session(token, io); - } - for p in ready_data { - let mut h = self.handlers.get_mut(p).unwrap(); - h.connected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token); - } - if let Some((p, packet_id, data)) = packet_data { - let mut h = self.handlers.get_mut(p).unwrap(); - h.read(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token, packet_id, &data[1..]); - } - - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); - }, - _ => (), - } - } - - fn start_session(&mut self, token: StreamToken, io: &mut IoContext>) { - let info = &self.info; - // TODO: use slab::replace_with (currently broken) - /* - match self.connections.remove(token) { - Some(ConnectionEntry::Handshake(h)) => { - match Session::new(h, io.event_loop, info) { - Ok(session) => { - assert!(token == self.connections.insert(ConnectionEntry::Session(session)).ok().unwrap()); - }, - Err(e) => { - debug!(target: "net", "Session construction error: {:?}", e); + }, + Ok(SessionData::Packet { + data, + protocol, + packet_id, + }) => { + match self.handlers.read().unwrap().get(protocol) { + None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) }, + Some(_) => packet_data = Some((protocol, packet_id, data)), + } + }, + Ok(SessionData::None) => {}, } } - }, - _ => panic!("Error updating slab with session") - }*/ - self.connections.replace_with(token, |c| { - match c { - ConnectionEntry::Handshake(h) => Session::new(h, io.event_loop, info) - .map(|s| Some(ConnectionEntry::Session(s))) - .unwrap_or_else(|e| { - debug!(target: "net", "Session construction error: {:?}", e); - None - }), - _ => { panic!("No handshake to create a session from"); } } - }).expect("Error updating slab with session"); + } else { + warn!(target: "net", "Received event for unknown connection"); + } + if kill { + self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection + return; + } else if create_session { + self.start_session(token, io); + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); + } + for p in ready_data { + let h = self.handlers.read().unwrap().get(p).unwrap().clone(); + h.connected(&mut NetworkContext::new(io, p, Some(token), self.connections.clone()), &token); + } + if let Some((p, packet_id, data)) = packet_data { + let h = self.handlers.read().unwrap().get(p).unwrap().clone(); + h.read(&mut NetworkContext::new(io, p, Some(token), self.connections.clone()), &token, packet_id, &data[1..]); + } + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Token registration error: {:?}", e)); } - fn connection_timeout<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { + fn start_session(&self, token: StreamToken, io: &IoContext>) { + self.connections.write().unwrap().replace_with(token, |c| { + match Arc::try_unwrap(c).ok().unwrap().into_inner().unwrap() { + ConnectionEntry::Handshake(h) => { + let session = Session::new(h, io, &self.info.read().unwrap()).expect("Session creation error"); + io.update_registration(token).expect("Error updating session registration"); + Some(Arc::new(Mutex::new(ConnectionEntry::Session(session)))) + }, + _ => { None } // handshake expired + } + }).ok(); + } + + fn connection_timeout(&self, token: StreamToken, io: &IoContext>) { self.kill_connection(token, io) } - fn kill_connection<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { + fn kill_connection(&self, token: StreamToken, io: &IoContext>) { let mut to_disconnect: Vec = Vec::new(); - let mut remove = true; - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(_)) => (), // just abandon handshake - Some(&mut ConnectionEntry::Session(ref mut s)) if s.is_ready() => { - for (p, _) in self.handlers.iter_mut() { - if s.have_capability(p) { - to_disconnect.push(p); - } + { + let mut connections = self.connections.write().unwrap(); + if let Some(connection) = connections.get(token).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Handshake(_) => { + connections.remove(token); + }, + &mut ConnectionEntry::Session(ref mut s) if s.is_ready() => { + for (p, _) in self.handlers.read().unwrap().iter() { + if s.have_capability(p) { + to_disconnect.push(p); + } + } + connections.remove(token); + }, + _ => {}, } - }, - _ => { - remove = false; - }, + } } for p in to_disconnect { - let mut h = self.handlers.get_mut(p).unwrap(); - h.disconnected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token); - } - if remove { - self.connections.remove(token); + let h = self.handlers.read().unwrap().get(p).unwrap().clone(); + h.disconnected(&mut NetworkContext::new(io, p, Some(token), self.connections.clone()), &token); } } } -impl IoHandler> for Host where Message: Send + 'static { +impl IoHandler> for Host where Message: Send + Sync + Clone + 'static { /// Initialize networking - fn initialize(&mut self, io: &mut IoContext>) { - /* - match ::ifaces::Interface::get_all().unwrap().into_iter().filter(|x| x.kind == ::ifaces::Kind::Packet && x.addr.is_some()).next() { - Some(iface) => config.public_address = iface.addr.unwrap(), - None => warn!("No public network interface"), - */ - - // Start listening for incoming connections - io.event_loop.register(&self.listener, Token(TCP_ACCEPT), EventSet::readable(), PollOpt::edge()).unwrap(); - io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap(); - // open the udp socket - io.event_loop.register(&self.udp_socket, Token(NODETABLE_RECEIVE), EventSet::readable(), PollOpt::edge()).unwrap(); - io.event_loop.timeout_ms(Token(NODETABLE_MAINTAIN), 7200).unwrap(); - let port = self.info.config.listen_address.port(); - self.info.listen_port = port; - - self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303"); -/* // GO bootnodes - self.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE - self.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR - self.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG - // ETH/DEV cpp-ethereum (poc-9.ethdev.com) - self.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303");*/ + fn initialize(&self, io: &IoContext>) { + io.register_stream(TCP_ACCEPT).expect("Error registering TCP listener"); + io.register_stream(NODETABLE_RECEIVE).expect("Error registering UDP listener"); + io.register_timer(IDLE, MAINTENANCE_TIMEOUT).expect("Error registering Network idle timer"); + //io.register_timer(NODETABLE_MAINTAIN, 7200); } - fn stream_hup<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, stream: StreamToken) { + fn stream_hup(&self, io: &IoContext>, stream: StreamToken) { trace!(target: "net", "Hup: {}", stream); match stream { FIRST_CONNECTION ... LAST_CONNECTION => self.connection_closed(stream, io), @@ -578,7 +581,7 @@ impl IoHandler> for Host where Messa }; } - fn stream_readable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, stream: StreamToken) { + fn stream_readable(&self, io: &IoContext>, stream: StreamToken) { match stream { FIRST_CONNECTION ... LAST_CONNECTION => self.connection_readable(stream, io), NODETABLE_RECEIVE => {}, @@ -587,65 +590,114 @@ impl IoHandler> for Host where Messa } } - fn stream_writable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, stream: StreamToken) { + fn stream_writable(&self, io: &IoContext>, stream: StreamToken) { match stream { FIRST_CONNECTION ... LAST_CONNECTION => self.connection_writable(stream, io), + NODETABLE_RECEIVE => {}, _ => panic!("Received unknown writable token"), } } - fn timeout<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, token: TimerToken) { + fn timeout(&self, io: &IoContext>, token: TimerToken) { match token { IDLE => self.maintain_network(io), FIRST_CONNECTION ... LAST_CONNECTION => self.connection_timeout(token, io), NODETABLE_DISCOVERY => {}, NODETABLE_MAINTAIN => {}, - _ => match self.timers.get_mut(&token).map(|p| *p) { - Some(protocol) => match self.handlers.get_mut(protocol) { - None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) }, - Some(h) => { h.timeout(&mut NetworkContext::new(io, protocol, Some(token), &mut self.connections, &mut self.timers), token); } - }, - None => {} // time not registerd through us + _ => match self.timers.read().unwrap().get(&token).map(|p| *p) { + Some(timer) => match self.handlers.read().unwrap().get(timer.protocol).map(|h| h.clone()) { + None => { warn!(target: "net", "No handler found for protocol: {:?}", timer.protocol) }, + Some(h) => { h.timeout(&NetworkContext::new(io, timer.protocol, None, self.connections.clone()), timer.token); } + }, + None => { warn!("Unknown timer token: {}", token); } // timer is not registerd through us } } } - fn message<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, message: &'s mut NetworkIoMessage) { + fn message(&self, io: &IoContext>, message: &NetworkIoMessage) { match message { - &mut NetworkIoMessage::AddHandler { - ref mut handler, + &NetworkIoMessage::AddHandler { + ref handler, ref protocol, ref versions } => { - let mut h = mem::replace(handler, None).unwrap(); - h.initialize(&mut NetworkContext::new(io, protocol, None, &mut self.connections, &mut self.timers)); - self.handlers.insert(protocol, h); + let h = handler.clone(); + h.initialize(&NetworkContext::new(io, protocol, None, self.connections.clone())); + self.handlers.write().unwrap().insert(protocol, h); + let mut info = self.info.write().unwrap(); for v in versions { - self.info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 }); + info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 }); } }, - &mut NetworkIoMessage::Send { + &NetworkIoMessage::AddTimer { + ref protocol, + ref delay, + ref token, + } => { + let handler_token = { + let mut timer_counter = self.timer_counter.write().unwrap(); + let counter = timer_counter.deref_mut(); + let handler_token = *counter; + *counter += 1; + handler_token + }; + self.timers.write().unwrap().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token }); + io.register_timer(handler_token, *delay).expect("Error registering timer"); + }, + &NetworkIoMessage::Send { ref peer, ref packet_id, ref protocol, ref data, } => { - match self.connections.get_mut(*peer as usize) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "net", "Send error: {:?}", e); - }); //TODO: don't copy vector data - }, - _ => { - warn!(target: "net", "Send: Peer does not exist"); + if let Some(connection) = self.connections.read().unwrap().get(*peer).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Session(ref mut s) => { + s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| { + warn!(target: "net", "Send error: {:?}", e); + }); //TODO: don't copy vector data + }, + _ => { warn!(target: "net", "Send: Peer session not exist"); } } - } + } else { warn!(target: "net", "Send: Peer does not exist"); } }, - &mut NetworkIoMessage::User(ref message) => { - for (p, h) in self.handlers.iter_mut() { - h.message(&mut NetworkContext::new(io, p, None, &mut self.connections, &mut self.timers), &message); + &NetworkIoMessage::User(ref message) => { + for (p, h) in self.handlers.read().unwrap().iter() { + h.message(&mut NetworkContext::new(io, p, None, self.connections.clone()), &message); } } } } + + fn register_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop>>) { + match stream { + FIRST_CONNECTION ... LAST_CONNECTION => { + if let Some(connection) = self.connections.read().unwrap().get(stream).map(|c| c.clone()) { + match connection.lock().unwrap().deref() { + &ConnectionEntry::Handshake(ref h) => h.register_socket(reg, event_loop).expect("Error registering socket"), + _ => warn!("Unexpected session stream registration") + } + } else { warn!("Unexpected stream registration")} + } + NODETABLE_RECEIVE => event_loop.register(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), + TCP_ACCEPT => event_loop.register(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), + _ => warn!("Unexpected stream regitration") + } + } + + fn update_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop>>) { + match stream { + FIRST_CONNECTION ... LAST_CONNECTION => { + if let Some(connection) = self.connections.read().unwrap().get(stream).map(|c| c.clone()) { + match connection.lock().unwrap().deref() { + &ConnectionEntry::Handshake(ref h) => h.update_socket(reg, event_loop).expect("Error updating socket"), + &ConnectionEntry::Session(ref s) => s.update_socket(reg, event_loop).expect("Error updating socket"), + } + } else { warn!("Unexpected stream update")} + } + NODETABLE_RECEIVE => event_loop.reregister(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), + TCP_ACCEPT => event_loop.reregister(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), + _ => warn!("Unexpected stream update") + } + } } diff --git a/util/src/network/mod.rs b/util/src/network/mod.rs index a47e88927..6facaf704 100644 --- a/util/src/network/mod.rs +++ b/util/src/network/mod.rs @@ -56,31 +56,31 @@ mod service; mod error; mod node; -pub type PeerId = host::PeerId; -pub type PacketId = host::PacketId; -pub type NetworkContext<'s,'io, Message> = host::NetworkContext<'s, 'io, Message>; -pub type NetworkService = service::NetworkService; -pub type NetworkIoMessage = host::NetworkIoMessage; +pub use network::host::PeerId; +pub use network::host::PacketId; +pub use network::host::NetworkContext; +pub use network::service::NetworkService; +pub use network::host::NetworkIoMessage; pub use network::host::NetworkIoMessage::User as UserMessage; -pub type NetworkError = error::NetworkError; +pub use network::error::NetworkError; -use io::*; +use io::TimerToken; /// Network IO protocol handler. This needs to be implemented for each new subprotocol. /// All the handler function are called from within IO event loop. /// `Message` is the type for message data. -pub trait NetworkProtocolHandler: Send where Message: Send { +pub trait NetworkProtocolHandler: Sync + Send where Message: Send + Sync + Clone { /// Initialize the handler - fn initialize(&mut self, _io: &mut NetworkContext) {} + fn initialize(&self, _io: &NetworkContext) {} /// Called when new network packet received. - fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]); + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]); /// Called when new peer is connected. Only called when peer supports the same protocol. - fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId); + fn connected(&self, io: &NetworkContext, peer: &PeerId); /// Called when a previously connected peer disconnects. - fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId); + fn disconnected(&self, io: &NetworkContext, peer: &PeerId); /// Timer function called after a timeout created with `NetworkContext::timeout`. - fn timeout(&mut self, _io: &mut NetworkContext, _timer: TimerToken) {} + fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) {} /// Called when a broadcasted message is received. The message can only be sent from a different IO handler. - fn message(&mut self, _io: &mut NetworkContext, _message: &Message) {} + fn message(&self, _io: &NetworkContext, _message: &Message) {} } diff --git a/util/src/network/service.rs b/util/src/network/service.rs index 4c333b8af..1318737a3 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -1,3 +1,4 @@ +use std::sync::*; use error::*; use network::{NetworkProtocolHandler}; use network::error::{NetworkError}; @@ -6,18 +7,18 @@ use io::*; /// IO Service with networking /// `Message` defines a notification data type. -pub struct NetworkService where Message: Send + 'static { +pub struct NetworkService where Message: Send + Sync + Clone + 'static { io_service: IoService>, host_info: String, } -impl NetworkService where Message: Send + 'static { +impl NetworkService where Message: Send + Sync + Clone + 'static { /// Starts IO event loop pub fn start() -> Result, UtilError> { let mut io_service = try!(IoService::>::start()); - let host = Box::new(Host::new()); - let host_info = host.info.client_version.clone(); - info!("NetworkService::start(): id={:?}", host.info.id()); + let host = Arc::new(Host::new()); + let host_info = host.client_version(); + info!("NetworkService::start(): id={:?}", host.client_id()); try!(io_service.register_handler(host)); Ok(NetworkService { io_service: io_service, @@ -37,9 +38,9 @@ impl NetworkService where Message: Send + 'static { } /// Regiter a new protocol handler with the event loop. - pub fn register_protocol(&mut self, handler: Box+Send>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> { + pub fn register_protocol(&mut self, handler: Arc+Send + Sync>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> { try!(self.io_service.send_message(NetworkIoMessage::AddHandler { - handler: Some(handler), + handler: handler, protocol: protocol, versions: versions.to_vec(), })); @@ -55,7 +56,5 @@ impl NetworkService where Message: Send + 'static { pub fn io(&mut self) -> &mut IoService> { &mut self.io_service } - - } diff --git a/util/src/network/session.rs b/util/src/network/session.rs index 828e4b062..5722ffde4 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -4,6 +4,7 @@ use rlp::*; use network::connection::{EncryptedConnection, Packet}; use network::handshake::Handshake; use error::*; +use io::{IoContext}; use network::error::{NetworkError, DisconnectReason}; use network::host::*; use network::node::NodeId; @@ -84,7 +85,7 @@ const PACKET_LAST: u8 = 0x7f; impl Session { /// Create a new session out of comepleted handshake. Consumes handshake object. - pub fn new>(h: Handshake, event_loop: &mut EventLoop, host: &HostInfo) -> Result { + pub fn new(h: Handshake, _io: &IoContext, host: &HostInfo) -> Result where Message: Send + Sync + Clone { let id = h.id.clone(); let connection = try!(EncryptedConnection::new(h)); let mut session = Session { @@ -99,7 +100,6 @@ impl Session { }; try!(session.write_hello(host)); try!(session.write_ping()); - try!(session.connection.register(event_loop)); Ok(session) } @@ -109,16 +109,16 @@ impl Session { } /// Readable IO handler. Returns packet data if available. - pub fn readable(&mut self, event_loop: &mut EventLoop, host: &HostInfo) -> Result { - match try!(self.connection.readable(event_loop)) { + pub fn readable(&mut self, io: &IoContext, host: &HostInfo) -> Result where Message: Send + Sync + Clone { + match try!(self.connection.readable(io)) { Some(data) => Ok(try!(self.read_packet(data, host))), None => Ok(SessionData::None) } } /// Writable IO handler. Sends pending packets. - pub fn writable(&mut self, event_loop: &mut EventLoop, _host: &HostInfo) -> Result<(), UtilError> { - self.connection.writable(event_loop) + pub fn writable(&mut self, io: &IoContext, _host: &HostInfo) -> Result<(), UtilError> where Message: Send + Sync + Clone { + self.connection.writable(io) } /// Checks if peer supports given capability @@ -127,8 +127,8 @@ impl Session { } /// Update registration with the event loop. Should be called at the end of the IO handler. - pub fn reregister(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.connection.reregister(event_loop) + pub fn update_socket(&self, reg:Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + self.connection.update_socket(reg, event_loop) } /// Send a protocol packet to peer. From b07cbcd8cdd47e140778e01afb3bbef4fd4a2261 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Thu, 21 Jan 2016 19:32:48 +0100 Subject: [PATCH 023/138] Changing some tests from ignore to heavy --- src/tests/transaction.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tests/transaction.rs b/src/tests/transaction.rs index 1603ad55a..967738c91 100644 --- a/src/tests/transaction.rs +++ b/src/tests/transaction.rs @@ -65,14 +65,14 @@ declare_test!{TransactionTests/ttTransactionTest} declare_test!{TransactionTests/tt10mbDataField} declare_test!{TransactionTests/ttWrongRLPTransaction} declare_test!{TransactionTests/Homestead/ttTransactionTest} -declare_test!{TransactionTests/Homestead/tt10mbDataField} +declare_test!{heavy => TransactionTests/Homestead/tt10mbDataField} declare_test!{TransactionTests/Homestead/ttWrongRLPTransaction} declare_test!{TransactionTests/RandomTests/tr201506052141PYTHON}*/ declare_test!{TransactionTests_ttTransactionTest, "TransactionTests/ttTransactionTest"} -declare_test!{ignore => TransactionTests_tt10mbDataField, "TransactionTests/tt10mbDataField"} +declare_test!{TransactionTests_tt10mbDataField, "TransactionTests/tt10mbDataField"} declare_test!{TransactionTests_ttWrongRLPTransaction, "TransactionTests/ttWrongRLPTransaction"} declare_test!{TransactionTests_Homestead_ttTransactionTest, "TransactionTests/Homestead/ttTransactionTest"} -declare_test!{ignore => TransactionTests_Homestead_tt10mbDataField, "TransactionTests/Homestead/tt10mbDataField"} +declare_test!{heavy => TransactionTests_Homestead_tt10mbDataField, "TransactionTests/Homestead/tt10mbDataField"} declare_test!{TransactionTests_Homestead_ttWrongRLPTransaction, "TransactionTests/Homestead/ttWrongRLPTransaction"} declare_test!{TransactionTests_RandomTests_tr201506052141PYTHON, "TransactionTests/RandomTests/tr201506052141PYTHON"} From 4bf1c205b41b4471b018c511f3b093b43ea14476 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Jan 2016 23:33:52 +0100 Subject: [PATCH 024/138] DB commit queue --- src/bin/client/main.rs | 8 +-- src/{queue.rs => block_queue.rs} | 4 +- src/client.rs | 95 ++++++++++++++++++++++---------- src/lib.rs | 3 +- src/service.rs | 26 ++++++--- src/sync/io.rs | 10 ++-- src/sync/mod.rs | 33 ++++------- util/src/io/service.rs | 5 ++ util/src/journaldb.rs | 10 ++++ util/src/network/host.rs | 8 +-- 10 files changed, 127 insertions(+), 75 deletions(-) rename src/{queue.rs => block_queue.rs} (98%) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index e49dc2dbc..3ebf4e080 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -10,10 +10,9 @@ use log::{LogLevelFilter}; use env_logger::LogBuilder; use util::*; use ethcore::client::*; -use ethcore::service::ClientService; +use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethcore::blockchain::CacheSize; -use ethcore::sync::*; fn setup_log() { let mut builder = LogBuilder::new(); @@ -90,7 +89,7 @@ impl Informant { const INFO_TIMER: TimerToken = 0; struct ClientIoHandler { - client: Arc>, + client: Arc, info: Informant, } @@ -101,8 +100,7 @@ impl IoHandler for ClientIoHandler { fn timeout(&self, _io: &IoContext, timer: TimerToken) { if INFO_TIMER == timer { - let client = self.client.read().unwrap(); - self.info.tick(client.deref()); + self.info.tick(&self.client); } } } diff --git a/src/queue.rs b/src/block_queue.rs similarity index 98% rename from src/queue.rs rename to src/block_queue.rs index 7c74b56d7..0bb184a1b 100644 --- a/src/queue.rs +++ b/src/block_queue.rs @@ -1,12 +1,14 @@ +//! A queue of blocks. Sits between network or other I/O and the BlockChain. +//! Sorts them ready for blockchain insertion. use std::thread::{JoinHandle, self}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use util::*; use verification::*; use error::*; use engine::Engine; -use sync::*; use views::*; use header::*; +use service::*; /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. diff --git a/src/client.rs b/src/client.rs index 226a022ca..cf8b0fd7c 100644 --- a/src/client.rs +++ b/src/client.rs @@ -6,8 +6,9 @@ use error::*; use header::BlockNumber; use spec::Spec; use engine::Engine; -use queue::BlockQueue; -use sync::NetSyncMessage; +use block_queue::BlockQueue; +use db_queue::{DbQueue, StateDBCommit}; +use service::NetSyncMessage; use env_info::LastHashes; use verification::*; use block::*; @@ -95,13 +96,13 @@ pub trait BlockChainClient : Sync + Send { fn block_receipts(&self, hash: &H256) -> Option; /// Import a block into the blockchain. - fn import_block(&mut self, bytes: Bytes) -> ImportResult; + fn import_block(&self, bytes: Bytes) -> ImportResult; /// Get block queue information. fn queue_status(&self) -> BlockQueueStatus; /// Clear block queue and abort all import activity. - fn clear_queue(&mut self); + fn clear_queue(&self); /// Get blockchain information. fn chain_info(&self) -> BlockChainInfo; @@ -132,19 +133,24 @@ pub struct Client { chain: Arc>, engine: Arc>, state_db: JournalDB, - queue: BlockQueue, - report: ClientReport, + block_queue: RwLock, + db_queue: RwLock, + report: RwLock, + uncommited_states: RwLock>, + import_lock: Mutex<()> } const HISTORY: u64 = 1000; impl Client { /// Create a new client with given spec and DB path. - pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { + pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result, Error> { let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); let mut opts = Options::new(); opts.set_max_open_files(256); opts.create_if_missing(true); + opts.set_disable_data_sync(true); + opts.set_disable_auto_compactions(true); /*opts.set_use_fsync(false); opts.set_bytes_per_sync(8388608); opts.set_disable_data_sync(false); @@ -164,37 +170,46 @@ impl Client { let mut state_path = path.to_path_buf(); state_path.push("state"); - let db = DB::open(&opts, state_path.to_str().unwrap()).unwrap(); - let mut state_db = JournalDB::new(db); + let db = Arc::new(DB::open(&opts, state_path.to_str().unwrap()).unwrap()); let engine = Arc::new(try!(spec.to_engine())); - if engine.spec().ensure_db_good(&mut state_db) { - state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + { + let mut state_db = JournalDB::new_with_arc(db.clone()); + if engine.spec().ensure_db_good(&mut state_db) { + state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + } } + let state_db = JournalDB::new_with_arc(db); -// chain.write().unwrap().ensure_good(&state_db); - - Ok(Client { + let client = Arc::new(Client { chain: chain, engine: engine.clone(), state_db: state_db, - queue: BlockQueue::new(engine, message_channel), - report: Default::default(), - }) + block_queue: RwLock::new(BlockQueue::new(engine, message_channel)), + db_queue: RwLock::new(DbQueue::new()), + report: RwLock::new(Default::default()), + uncommited_states: RwLock::new(HashMap::new()), + import_lock: Mutex::new(()), + }); + + let weak = Arc::downgrade(&client); + client.db_queue.read().unwrap().start(weak); + Ok(client) } /// This is triggered by a message coming from a block queue when the block is ready for insertion - pub fn import_verified_blocks(&mut self) { + pub fn import_verified_blocks(&self, _io: &IoChannel) { let mut bad = HashSet::new(); - let blocks = self.queue.drain(128); + let _import_lock = self.import_lock.lock(); + let blocks = self.block_queue.write().unwrap().drain(128); if blocks.is_empty() { return; } for block in blocks { if bad.contains(&block.header.parent_hash) { - self.queue.mark_as_bad(&block.header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); continue; } @@ -202,7 +217,7 @@ impl Client { let header = &block.header; if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); return; }; @@ -210,7 +225,7 @@ impl Client { Some(p) => p, None => { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); return; }, @@ -228,18 +243,23 @@ impl Client { } } - let result = match enact_verified(&block, self.engine.deref().deref(), self.state_db.clone(), &parent, &last_hashes) { + let db = match self.uncommited_states.read().unwrap().get(&header.parent_hash) { + Some(db) => db.clone(), + None => self.state_db.clone(), + }; + + let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { Ok(b) => b, Err(e) => { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); bad.insert(block.header.hash()); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); return; } }; if let Err(e) = verify_block_final(&header, result.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); return; } @@ -252,11 +272,25 @@ impl Client { return; } } - self.report.accrue_block(&block); + /* + let db = result.drain(); + self.uncommited_states.write().unwrap().insert(header.hash(), db.clone()); + self.db_queue.write().unwrap().queue(StateDBCommit { + now: header.number(), + hash: header.hash().clone(), + end: ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap())), + db: db, + });*/ + self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } } + /// Clear cached state overlay + pub fn clear_state(&self, hash: &H256) { + self.uncommited_states.write().unwrap().remove(hash); + } + /// Get info on the cache. pub fn cache_info(&self) -> CacheSize { self.chain.read().unwrap().cache_size() @@ -264,7 +298,7 @@ impl Client { /// Get the report. pub fn report(&self) -> ClientReport { - self.report.clone() + self.report.read().unwrap().clone() } /// Tick the client. @@ -327,12 +361,12 @@ impl BlockChainClient for Client { unimplemented!(); } - fn import_block(&mut self, bytes: Bytes) -> ImportResult { + fn import_block(&self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); if self.chain.read().unwrap().is_known(&header.hash()) { return Err(ImportError::AlreadyInChain); } - self.queue.import_block(bytes) + self.block_queue.write().unwrap().import_block(bytes) } fn queue_status(&self) -> BlockQueueStatus { @@ -341,7 +375,8 @@ impl BlockChainClient for Client { } } - fn clear_queue(&mut self) { + fn clear_queue(&self) { + self.block_queue.write().unwrap().clear(); } fn chain_info(&self) -> BlockChainInfo { diff --git a/src/lib.rs b/src/lib.rs index a5b6c3dae..58d84764a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -150,5 +150,6 @@ pub mod block; /// TODO [arkpar] Please document me pub mod verification; /// TODO [debris] Please document me -pub mod queue; +pub mod db_queue; +pub mod block_queue; pub mod ethereum; diff --git a/src/service.rs b/src/service.rs index b97c1cb69..4034ce841 100644 --- a/src/service.rs +++ b/src/service.rs @@ -5,10 +5,22 @@ use error::*; use std::env; use client::Client; +/// Message type for external and internal events +#[derive(Clone)] +pub enum SyncMessage { + /// New block has been imported into the blockchain + NewChainBlock(Bytes), //TODO: use Cow + /// A block is ready + BlockVerified, +} + +/// TODO [arkpar] Please document me +pub type NetSyncMessage = NetworkIoMessage; + /// Client service setup. Creates and registers client and network services with the IO subsystem. pub struct ClientService { net_service: NetworkService, - client: Arc>, + client: Arc, } impl ClientService { @@ -20,7 +32,7 @@ impl ClientService { let mut dir = env::home_dir().unwrap(); dir.push(".parity"); dir.push(H64::from(spec.genesis_header().hash()).hex()); - let client = Arc::new(RwLock::new(try!(Client::new(spec, &dir, net_service.io().channel())))); + let client = try!(Client::new(spec, &dir, net_service.io().channel())); EthSync::register(&mut net_service, client.clone()); let client_io = Arc::new(ClientIoHandler { client: client.clone() @@ -39,14 +51,14 @@ impl ClientService { } /// TODO [arkpar] Please document me - pub fn client(&self) -> Arc> { + pub fn client(&self) -> Arc { self.client.clone() } } /// IO interface for the Client handler struct ClientIoHandler { - client: Arc> + client: Arc } const CLIENT_TICK_TIMER: TimerToken = 0; @@ -59,16 +71,16 @@ impl IoHandler for ClientIoHandler { fn timeout(&self, _io: &IoContext, timer: TimerToken) { if timer == CLIENT_TICK_TIMER { - self.client.read().unwrap().tick(); + self.client.tick(); } } - fn message(&self, _io: &IoContext, net_message: &NetSyncMessage) { + fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { match net_message { &UserMessage(ref message) => { match message { &SyncMessage::BlockVerified => { - self.client.write().unwrap().import_verified_blocks(); + self.client.import_verified_blocks(&io.channel()); }, _ => {}, // ignore other messages } diff --git a/src/sync/io.rs b/src/sync/io.rs index f49591a9f..754e3add5 100644 --- a/src/sync/io.rs +++ b/src/sync/io.rs @@ -1,7 +1,7 @@ use client::BlockChainClient; use util::{NetworkContext, PeerId, PacketId,}; use util::error::UtilError; -use sync::SyncMessage; +use service::SyncMessage; /// IO interface for the syning handler. /// Provides peer connection management and an interface to the blockchain client. @@ -14,7 +14,7 @@ pub trait SyncIo { /// Send a packet to a peer. fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError>; /// Get the blockchain - fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; + fn chain<'s>(&'s self) -> &'s BlockChainClient; /// Returns peer client identifier string fn peer_info(&self, peer_id: PeerId) -> String { peer_id.to_string() @@ -24,12 +24,12 @@ pub trait SyncIo { /// Wraps `NetworkContext` and the blockchain client pub struct NetSyncIo<'s, 'h> where 'h: 's { network: &'s NetworkContext<'h, SyncMessage>, - chain: &'s mut BlockChainClient + chain: &'s BlockChainClient } impl<'s, 'h> NetSyncIo<'s, 'h> { /// Creates a new instance from the `NetworkContext` and the blockchain client reference. - pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s, 'h> { + pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> { NetSyncIo { network: network, chain: chain, @@ -50,7 +50,7 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { self.network.send(peer_id, packet_id, data) } - fn chain<'a>(&'a mut self) -> &'a mut BlockChainClient { + fn chain<'a>(&'a self) -> &'a BlockChainClient { self.chain } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 9bb18a1c0..c87dee569 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -25,9 +25,10 @@ use std::ops::*; use std::sync::*; use client::Client; -use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkIoMessage}; +use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use sync::chain::ChainSync; -use util::{Bytes, TimerToken}; +use util::TimerToken; +use service::SyncMessage; use sync::io::NetSyncIo; mod chain; @@ -39,22 +40,10 @@ mod tests; const SYNC_TIMER: usize = 0; -/// Message type for external events -#[derive(Clone)] -pub enum SyncMessage { - /// New block has been imported into the blockchain - NewChainBlock(Bytes), //TODO: use Cow - /// A block is ready - BlockVerified, -} - -/// TODO [arkpar] Please document me -pub type NetSyncMessage = NetworkIoMessage; - /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint - chain: Arc>, + chain: Arc, /// Sync strategy sync: RwLock } @@ -63,7 +52,7 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, chain: Arc>) { + pub fn register(service: &mut NetworkService, chain: Arc) { let sync = Arc::new(EthSync { chain: chain, sync: RwLock::new(ChainSync::new()), @@ -78,12 +67,12 @@ impl EthSync { /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { - self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.deref())); } /// Restart sync pub fn restart(&mut self, io: &mut NetworkContext) { - self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.deref())); } } @@ -93,20 +82,20 @@ impl NetworkProtocolHandler for EthSync { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.sync.write().unwrap().on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data); + self.sync.write().unwrap().on_packet(&mut NetSyncIo::new(io, self.chain.deref()) , *peer, packet_id, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().unwrap().on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + self.sync.write().unwrap().on_peer_connected(&mut NetSyncIo::new(io, self.chain.deref()), *peer); } fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.deref()), *peer); } fn timeout(&self, io: &NetworkContext, timer: TimerToken) { if timer == SYNC_TIMER { - self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } } } diff --git a/util/src/io/service.rs b/util/src/io/service.rs index a229e4022..fab0f113d 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -102,6 +102,11 @@ impl IoContext where Message: Send + Clone + 'static { pub fn message(&self, message: Message) { self.channel.send(message).expect("Error seding message"); } + + /// Get message channel + pub fn channel(&self) -> IoChannel { + self.channel.clone() + } } #[derive(Clone)] diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index ada9c0d2b..9115c4362 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -34,6 +34,16 @@ impl JournalDB { } } + /// Create a new instance given a shared `backing` database. + pub fn new_with_arc(backing: Arc) -> JournalDB { + JournalDB { + forward: OverlayDB::new_with_arc(backing.clone()), + backing: backing, + inserts: vec![], + removes: vec![], + } + } + /// Create a new instance with an anonymous temporary database. pub fn new_temp() -> JournalDB { let mut dir = env::temp_dir(); diff --git a/util/src/network/host.rs b/util/src/network/host.rs index f83cb1908..b9b9496c4 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -675,13 +675,13 @@ impl IoHandler> for Host where Messa if let Some(connection) = self.connections.read().unwrap().get(stream).map(|c| c.clone()) { match connection.lock().unwrap().deref() { &ConnectionEntry::Handshake(ref h) => h.register_socket(reg, event_loop).expect("Error registering socket"), - _ => warn!("Unexpected session stream registration") + &ConnectionEntry::Session(_) => warn!("Unexpected session stream registration") } - } else { warn!("Unexpected stream registration")} + } else {} // expired } NODETABLE_RECEIVE => event_loop.register(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), TCP_ACCEPT => event_loop.register(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), - _ => warn!("Unexpected stream regitration") + _ => warn!("Unexpected stream registration") } } @@ -693,7 +693,7 @@ impl IoHandler> for Host where Messa &ConnectionEntry::Handshake(ref h) => h.update_socket(reg, event_loop).expect("Error updating socket"), &ConnectionEntry::Session(ref s) => s.update_socket(reg, event_loop).expect("Error updating socket"), } - } else { warn!("Unexpected stream update")} + } else {} // expired } NODETABLE_RECEIVE => event_loop.reregister(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), TCP_ACCEPT => event_loop.reregister(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), From 67286901091f75dcfaf5b5de0030d215b05ccec6 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Jan 2016 23:34:06 +0100 Subject: [PATCH 025/138] DB commit queue --- src/db_queue.rs | 111 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 src/db_queue.rs diff --git a/src/db_queue.rs b/src/db_queue.rs new file mode 100644 index 000000000..242fd9cc4 --- /dev/null +++ b/src/db_queue.rs @@ -0,0 +1,111 @@ +//! A queue of state changes that are written to database in background. +use std::thread::{JoinHandle, self}; +use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; +use util::*; +use engine::Engine; +use client::Client; + +/// State DB commit params +pub struct StateDBCommit { + /// Database to commit + pub db: JournalDB, + /// Starting block number + pub now: u64, + /// Block ahash + pub hash: H256, + /// End block number + hash + pub end: Option<(u64, H256)>, +} + +/// A queue of state changes that are written to database in background. +pub struct DbQueue { + more_to_write: Arc, + queue: Arc>>, + writer: Mutex>>, + deleting: Arc, +} + +impl DbQueue { + /// Creates a new queue instance. + pub fn new() -> DbQueue { + let queue = Arc::new(Mutex::new(VecDeque::new())); + let more_to_write = Arc::new(Condvar::new()); + let deleting = Arc::new(AtomicBool::new(false)); + + DbQueue { + more_to_write: more_to_write.clone(), + queue: queue.clone(), + writer: Mutex::new(None), + deleting: deleting.clone(), + } + } + + /// Start processing the queue + pub fn start(&self, client: Weak) { + let writer = { + let queue = self.queue.clone(); + let client = client.clone(); + let more_to_write = self.more_to_write.clone(); + let deleting = self.deleting.clone(); + thread::Builder::new().name("DB Writer".to_string()).spawn(move || DbQueue::writer_loop(client, queue, more_to_write, deleting)).expect("Error creating db writer thread") + }; + mem::replace(self.writer.lock().unwrap().deref_mut(), Some(writer)); + } + + fn writer_loop(client: Weak, queue: Arc>>, wait: Arc, deleting: Arc) { + while !deleting.load(AtomicOrdering::Relaxed) { + let mut batch = { + let mut locked = queue.lock().unwrap(); + while locked.is_empty() && !deleting.load(AtomicOrdering::Relaxed) { + locked = wait.wait(locked).unwrap(); + } + + if deleting.load(AtomicOrdering::Relaxed) { + return; + } + mem::replace(locked.deref_mut(), VecDeque::new()) + }; + + for mut state in batch.drain(..) { //TODO: make this a single write transaction + match state.db.commit(state.now, &state.hash, state.end.clone()) { + Ok(_) => (), + Err(e) => { + warn!(target: "client", "State DB commit failed: {:?}", e); + } + } + client.upgrade().unwrap().clear_state(&state.hash); + } + + } + } + + /// Add a state to the queue + pub fn queue(&self, state: StateDBCommit) { + let mut queue = self.queue.lock().unwrap(); + queue.push_back(state); + self.more_to_write.notify_all(); + } +} + +impl Drop for DbQueue { + fn drop(&mut self) { + self.deleting.store(true, AtomicOrdering::Relaxed); + self.more_to_write.notify_all(); + mem::replace(self.writer.lock().unwrap().deref_mut(), None).unwrap().join().unwrap(); + } +} + +#[cfg(test)] +mod tests { + use util::*; + use spec::*; + use queue::*; + + #[test] + fn test_block_queue() { + // TODO better test + let spec = Spec::new_test(); + let engine = spec.to_engine().unwrap(); + let _ = BlockQueue::new(Arc::new(engine), IoChannel::disconnected()); + } +} From ccf1cc4d54b3df779f28c9c1d94d7f99813dafab Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 00:11:19 +0100 Subject: [PATCH 026/138] Removed obsolete code and added documentation --- Cargo.toml | 1 + src/bin/client/main.rs | 19 +++++++++++-------- util/src/io/service.rs | 10 +++++++--- util/src/network/error.rs | 6 ++++++ util/src/network/host.rs | 31 +++++++------------------------ util/src/network/service.rs | 13 +------------ 6 files changed, 33 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 04c4bf956..ee04e6fdf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ time = "0.1" evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" +ctrlc = "1.0" [features] jit = ["evmjit"] diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 3ebf4e080..7d673f8d3 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -3,11 +3,12 @@ extern crate ethcore; extern crate rustc_serialize; extern crate log; extern crate env_logger; +extern crate ctrlc; -use std::io::stdin; use std::env; use log::{LogLevelFilter}; use env_logger::LogBuilder; +use ctrlc::CtrlC; use util::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; @@ -31,13 +32,15 @@ fn main() { let mut service = ClientService::start(spec).unwrap(); let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); - loop { - let mut cmd = String::new(); - stdin().read_line(&mut cmd).unwrap(); - if cmd == "quit\n" || cmd == "exit\n" || cmd == "q\n" { - break; - } - } + + let exit = Arc::new(Condvar::new()); + let e = exit.clone(); + CtrlC::set_handler(move || { + e.notify_all(); + }); + + let mutex = Mutex::new(()); + let _ = exit.wait(mutex.lock().unwrap()).unwrap(); } struct Informant { diff --git a/util/src/io/service.rs b/util/src/io/service.rs index fab0f113d..7df064794 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -10,11 +10,14 @@ use arrayvec::*; use crossbeam::sync::chase_lev; use io::worker::{Worker, Work, WorkType}; +/// Timer ID pub type TimerToken = usize; +/// Timer ID pub type StreamToken = usize; +/// IO Hadndler ID pub type HandlerId = usize; -// Tokens +/// Maximum number of tokens a handler can use pub const TOKENS_PER_HANDLER: usize = 16384; /// Messages used to communicate with the event loop from other threads. @@ -49,8 +52,8 @@ pub enum IoMessage where Message: Send + Clone + Sized { /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. pub struct IoContext where Message: Send + Clone + 'static { - pub channel: IoChannel, - pub handler: HandlerId, + channel: IoChannel, + handler: HandlerId, } impl IoContext where Message: Send + Clone + 'static { @@ -249,6 +252,7 @@ impl IoChannel where Message: Send + Clone { Ok(()) } + /// Send low level io message pub fn send_io(&self, message: IoMessage) -> Result<(), IoError> { if let Some(ref channel) = self.channel { try!(channel.send(message)) diff --git a/util/src/network/error.rs b/util/src/network/error.rs index d255cb043..b9dfdc892 100644 --- a/util/src/network/error.rs +++ b/util/src/network/error.rs @@ -19,11 +19,17 @@ pub enum DisconnectReason } #[derive(Debug)] +/// Network error. pub enum NetworkError { + /// Authentication error. Auth, + /// Unrecognised protocol. BadProtocol, + /// Peer not found. PeerNotFound, + /// Peer is diconnected. Disconnect(DisconnectReason), + /// Socket IO error. Io(IoError), } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index b9b9496c4..55ade8090 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -69,22 +69,22 @@ pub type ProtocolId = &'static str; pub enum NetworkIoMessage where Message: Send + Sync + Clone { /// Register a new protocol handler. AddHandler { + /// Handler shared instance. handler: Arc + Sync>, + /// Protocol Id. protocol: ProtocolId, + /// Supported protocol versions. versions: Vec, }, + /// Register a new protocol timer AddTimer { + /// Protocol Id. protocol: ProtocolId, + /// Timer token. token: TimerToken, + /// Timer delay in milliseconds. delay: u64, }, - /// Send data over the network. // TODO: remove this - Send { - peer: PeerId, - packet_id: PacketId, - protocol: ProtocolId, - data: Vec, - }, /// User message User(Message), } @@ -644,23 +644,6 @@ impl IoHandler> for Host where Messa self.timers.write().unwrap().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token }); io.register_timer(handler_token, *delay).expect("Error registering timer"); }, - &NetworkIoMessage::Send { - ref peer, - ref packet_id, - ref protocol, - ref data, - } => { - if let Some(connection) = self.connections.read().unwrap().get(*peer).map(|c| c.clone()) { - match connection.lock().unwrap().deref_mut() { - &mut ConnectionEntry::Session(ref mut s) => { - s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "net", "Send error: {:?}", e); - }); //TODO: don't copy vector data - }, - _ => { warn!(target: "net", "Send: Peer session not exist"); } - } - } else { warn!(target: "net", "Send: Peer does not exist"); } - }, &NetworkIoMessage::User(ref message) => { for (p, h) in self.handlers.read().unwrap().iter() { h.message(&mut NetworkContext::new(io, p, None, self.connections.clone()), &message); diff --git a/util/src/network/service.rs b/util/src/network/service.rs index 1318737a3..67d2b55e2 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -2,7 +2,7 @@ use std::sync::*; use error::*; use network::{NetworkProtocolHandler}; use network::error::{NetworkError}; -use network::host::{Host, NetworkIoMessage, PeerId, PacketId, ProtocolId}; +use network::host::{Host, NetworkIoMessage, ProtocolId}; use io::*; /// IO Service with networking @@ -26,17 +26,6 @@ impl NetworkService where Message: Send + Sync + Clone + 'stat }) } - /// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads. - pub fn send(&mut self, peer: &PeerId, packet_id: PacketId, protocol: ProtocolId, data: &[u8]) -> Result<(), NetworkError> { - try!(self.io_service.send_message(NetworkIoMessage::Send { - peer: *peer, - packet_id: packet_id, - protocol: protocol, - data: data.to_vec() - })); - Ok(()) - } - /// Regiter a new protocol handler with the event loop. pub fn register_protocol(&mut self, handler: Arc+Send + Sync>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> { try!(self.io_service.send_message(NetworkIoMessage::AddHandler { From d431854421d024da69c22ac3bc1ed0725f79aaf2 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 00:47:45 +0100 Subject: [PATCH 027/138] Fixed tests --- util/src/io/mod.rs | 23 +++++++++------- util/src/network/mod.rs | 58 +++++++++++++++++++++++++++++++++++------ 2 files changed, 63 insertions(+), 18 deletions(-) diff --git a/util/src/io/mod.rs b/util/src/io/mod.rs index 4406c751f..48c02f6ee 100644 --- a/util/src/io/mod.rs +++ b/util/src/io/mod.rs @@ -8,27 +8,28 @@ /// /// struct MyHandler; /// +/// #[derive(Clone)] /// struct MyMessage { /// data: u32 /// } /// /// impl IoHandler for MyHandler { -/// fn initialize(&mut self, io: &mut IoContext) { -/// io.register_timer(1000).unwrap(); +/// fn initialize(&self, io: &IoContext) { +/// io.register_timer(0, 1000).unwrap(); /// } /// -/// fn timeout(&mut self, _io: &mut IoContext, timer: TimerToken) { +/// fn timeout(&self, _io: &IoContext, timer: TimerToken) { /// println!("Timeout {}", timer); /// } /// -/// fn message(&mut self, _io: &mut IoContext, message: &mut MyMessage) { +/// fn message(&self, _io: &IoContext, message: &MyMessage) { /// println!("Message {}", message.data); /// } /// } /// /// fn main () { /// let mut service = IoService::::start().expect("Error creating network service"); -/// service.register_handler(Box::new(MyHandler)).unwrap(); +/// service.register_handler(Arc::new(MyHandler)).unwrap(); /// /// // Wait for quit condition /// // ... @@ -93,24 +94,26 @@ pub use io::service::TOKENS_PER_HANDLER; #[cfg(test)] mod tests { + use std::sync::Arc; use io::*; struct MyHandler; + #[derive(Clone)] struct MyMessage { data: u32 } impl IoHandler for MyHandler { - fn initialize(&mut self, io: &mut IoContext) { - io.register_timer(1000).unwrap(); + fn initialize(&self, io: &IoContext) { + io.register_timer(0, 1000).unwrap(); } - fn timeout(&mut self, _io: &mut IoContext, timer: TimerToken) { + fn timeout(&self, _io: &IoContext, timer: TimerToken) { println!("Timeout {}", timer); } - fn message(&mut self, _io: &mut IoContext, message: &mut MyMessage) { + fn message(&self, _io: &IoContext, message: &MyMessage) { println!("Message {}", message.data); } } @@ -118,7 +121,7 @@ mod tests { #[test] fn test_service_register_handler () { let mut service = IoService::::start().expect("Error creating network service"); - service.register_handler(Box::new(MyHandler)).unwrap(); + service.register_handler(Arc::new(MyHandler)).unwrap(); } } diff --git a/util/src/network/mod.rs b/util/src/network/mod.rs index 32296d476..0c734442d 100644 --- a/util/src/network/mod.rs +++ b/util/src/network/mod.rs @@ -8,39 +8,40 @@ /// /// struct MyHandler; /// +/// #[derive(Clone)] /// struct MyMessage { /// data: u32 /// } /// /// impl NetworkProtocolHandler for MyHandler { -/// fn initialize(&mut self, io: &mut NetworkContext) { -/// io.register_timer(1000); +/// fn initialize(&self, io: &NetworkContext) { +/// io.register_timer(0, 1000); /// } /// -/// fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { +/// fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { /// println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer); /// } /// -/// fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId) { +/// fn connected(&self, io: &NetworkContext, peer: &PeerId) { /// println!("Connected {}", peer); /// } /// -/// fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId) { +/// fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { /// println!("Disconnected {}", peer); /// } /// -/// fn timeout(&mut self, io: &mut NetworkContext, timer: TimerToken) { +/// fn timeout(&self, io: &NetworkContext, timer: TimerToken) { /// println!("Timeout {}", timer); /// } /// -/// fn message(&mut self, io: &mut NetworkContext, message: &MyMessage) { +/// fn message(&self, io: &NetworkContext, message: &MyMessage) { /// println!("Message {}", message.data); /// } /// } /// /// fn main () { /// let mut service = NetworkService::::start().expect("Error creating network service"); -/// service.register_protocol(Box::new(MyHandler), "myproto", &[1u8]); +/// service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]); /// /// // Wait for quit condition /// // ... @@ -91,3 +92,44 @@ pub trait NetworkProtocolHandler: Sync + Send where Message: Send + Syn fn message(&self, _io: &NetworkContext, _message: &Message) {} } + +#[test] +fn test_net_service() { + + use std::sync::Arc; + struct MyHandler; + + #[derive(Clone)] + struct MyMessage { + data: u32 + } + + impl NetworkProtocolHandler for MyHandler { + fn initialize(&self, io: &NetworkContext) { + io.register_timer(0, 1000).unwrap(); + } + + fn read(&self, _io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer); + } + + fn connected(&self, _io: &NetworkContext, peer: &PeerId) { + println!("Connected {}", peer); + } + + fn disconnected(&self, _io: &NetworkContext, peer: &PeerId) { + println!("Disconnected {}", peer); + } + + fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { + println!("Timeout {}", timer); + } + + fn message(&self, _io: &NetworkContext, message: &MyMessage) { + println!("Message {}", message.data); + } + } + + let mut service = NetworkService::::start().expect("Error creating network service"); + service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]).unwrap(); +} From 81bb86d0ed1d379d9879138139339ca362cbf20d Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 01:27:51 +0100 Subject: [PATCH 028/138] Removed obsolete warnings --- util/src/io/service.rs | 2 +- util/src/io/worker.rs | 2 +- util/src/network/host.rs | 6 ++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 7df064794..6daaf8cc6 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -313,7 +313,7 @@ impl IoService where Message: Send + Sync + Clone + 'static { impl Drop for IoService where Message: Send + Sync + Clone { fn drop(&mut self) { self.host_channel.send(IoMessage::Shutdown).unwrap(); - self.thread.take().unwrap().join().unwrap(); + self.thread.take().unwrap().join().ok(); } } diff --git a/util/src/io/worker.rs b/util/src/io/worker.rs index 8527b245a..d4418afe0 100644 --- a/util/src/io/worker.rs +++ b/util/src/io/worker.rs @@ -99,6 +99,6 @@ impl Drop for Worker { self.deleting.store(true, AtomicOrdering::Relaxed); self.wait.notify_all(); let thread = mem::replace(&mut self.thread, None).unwrap(); - thread.join().unwrap(); + thread.join().ok(); } } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 55ade8090..9203e73b6 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -438,7 +438,7 @@ impl Host where Message: Send + Sync + Clone { io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); } } - } else { warn!(target: "net", "Received event for unknown connection") } + } if kill { self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection return; @@ -498,9 +498,7 @@ impl Host where Message: Send + Sync + Clone { } } } - } else { - warn!(target: "net", "Received event for unknown connection"); - } + } if kill { self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection return; From 9bcb720f1f6ef9f33000345a17d335b031e8ccb7 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 04:54:38 +0100 Subject: [PATCH 029/138] Minor optimizations --- src/bin/client/main.rs | 19 ++++++++++++++----- src/block_queue.rs | 29 +++++++++++++++++++++++++---- src/blockchain.rs | 24 +++++++++++++----------- src/client.rs | 37 ++++++------------------------------- src/service.rs | 10 +++++++++- src/sync/chain.rs | 14 ++++++++++---- src/sync/mod.rs | 15 +++------------ 7 files changed, 80 insertions(+), 68 deletions(-) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 7d673f8d3..147ea2be2 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -14,6 +14,7 @@ use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethcore::blockchain::CacheSize; +use ethcore::sync::EthSync; fn setup_log() { let mut builder = LogBuilder::new(); @@ -30,7 +31,7 @@ fn main() { setup_log(); let spec = ethereum::new_frontier(); let mut service = ClientService::start(spec).unwrap(); - let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default() }); + let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), sync: service.sync() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); let exit = Arc::new(Condvar::new()); @@ -60,22 +61,29 @@ impl Default for Informant { } impl Informant { - pub fn tick(&self, client: &Client) { + pub fn tick(&self, client: &Client, sync: &EthSync) { // 5 seconds betwen calls. TODO: calculate this properly. let dur = 5usize; let chain_info = client.chain_info(); + let queue_info = client.queue_info(); let cache_info = client.cache_info(); let report = client.report(); + let sync_info = sync.status(); if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //···{}···// {} ({}) bl {} ({}) ex ]", + println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, {} downloaded, {} queued ···// {} ({}) bl {} ({}) ex ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, (report.transactions_applied - last_report.transactions_applied) / dur, (report.gas_processed - last_report.gas_processed) / From::from(dur), - 0, // TODO: peers + + sync_info.num_active_peers, + sync_info.num_peers, + sync_info.blocks_received, + queue_info.queue_size, + cache_info.blocks, cache_info.blocks as isize - last_cache_info.blocks as isize, cache_info.block_details, @@ -93,6 +101,7 @@ const INFO_TIMER: TimerToken = 0; struct ClientIoHandler { client: Arc, + sync: Arc, info: Informant, } @@ -103,7 +112,7 @@ impl IoHandler for ClientIoHandler { fn timeout(&self, _io: &IoContext, timer: TimerToken) { if INFO_TIMER == timer { - self.info.tick(&self.client); + self.info.tick(&self.client, &self.sync); } } } diff --git a/src/block_queue.rs b/src/block_queue.rs index 0bb184a1b..1ffd0f7ec 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -10,6 +10,15 @@ use views::*; use header::*; use service::*; +/// Block queue status +#[derive(Debug)] +pub struct BlockQueueInfo { + /// Indicates that queue is full + pub full: bool, + /// Number of queued blocks + pub queue_size: usize, +} + /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. pub struct BlockQueue { @@ -65,14 +74,15 @@ impl BlockQueue { let deleting = Arc::new(AtomicBool::new(false)); let mut verifiers: Vec> = Vec::new(); - let thread_count = max(::num_cpus::get(), 2) - 1; - for _ in 0..thread_count { + let thread_count = max(::num_cpus::get(), 3) - 2; + for i in 0..thread_count { let verification = verification.clone(); let engine = engine.clone(); let more_to_verify = more_to_verify.clone(); let ready_signal = ready_signal.clone(); let deleting = deleting.clone(); - verifiers.push(thread::spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting))); + verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting)) + .expect("Error starting block verification thread")); } BlockQueue { engine: engine, @@ -206,7 +216,7 @@ impl BlockQueue { verification.verified = new_verified; } - /// TODO [arkpar] Please document me + /// Removes up to `max` verified blocks from the queue pub fn drain(&mut self, max: usize) -> Vec { let mut verification = self.verification.lock().unwrap(); let count = min(max, verification.verified.len()); @@ -217,8 +227,19 @@ impl BlockQueue { result.push(block); } self.ready_signal.reset(); + if !verification.verified.is_empty() { + self.ready_signal.set(); + } result } + + /// Get queue status. + pub fn queue_info(&self) -> BlockQueueInfo { + BlockQueueInfo { + full: false, + queue_size: self.verification.lock().unwrap().unverified.len(), + } + } } impl Drop for BlockQueue { diff --git a/src/blockchain.rs b/src/blockchain.rs index 27abe9ee3..0720d7229 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -342,19 +342,19 @@ impl BlockChain { Some(h) => h, None => return None, }; - Some(self._tree_route((from_details, from), (to_details, to))) + Some(self._tree_route((&from_details, &from), (&to_details, &to))) } /// Similar to `tree_route` function, but can be used to return a route /// between blocks which may not be in database yet. - fn _tree_route(&self, from: (BlockDetails, H256), to: (BlockDetails, H256)) -> TreeRoute { + fn _tree_route(&self, from: (&BlockDetails, &H256), to: (&BlockDetails, &H256)) -> TreeRoute { let mut from_branch = vec![]; let mut to_branch = vec![]; - let mut from_details = from.0; - let mut to_details = to.0; - let mut current_from = from.1; - let mut current_to = to.1; + let mut from_details = from.0.clone(); + let mut to_details = to.0.clone(); + let mut current_from = from.1.clone(); + let mut current_to = to.1.clone(); // reset from && to to the same level while from_details.number > to_details.number { @@ -409,7 +409,7 @@ impl BlockChain { // store block in db self.blocks_db.put(&hash, &bytes).unwrap(); - let (batch, new_best) = self.block_to_extras_insert_batch(bytes); + let (batch, new_best, details) = self.block_to_extras_insert_batch(bytes); // update best block let mut best_block = self.best_block.write().unwrap(); @@ -420,6 +420,8 @@ impl BlockChain { // update caches let mut write = self.block_details.write().unwrap(); write.remove(&header.parent_hash()); + write.insert(hash.clone(), details); + self.note_used(CacheID::Block(hash)); // update extras database self.extras_db.write(batch).unwrap(); @@ -427,7 +429,7 @@ impl BlockChain { /// Transforms block into WriteBatch that may be written into database /// Additionally, if it's new best block it returns new best block object. - fn block_to_extras_insert_batch(&self, bytes: &[u8]) -> (WriteBatch, Option) { + fn block_to_extras_insert_batch(&self, bytes: &[u8]) -> (WriteBatch, Option, BlockDetails) { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); @@ -459,7 +461,7 @@ impl BlockChain { // if it's not new best block, just return if !is_new_best { - return (batch, None); + return (batch, None, details); } // if its new best block we need to make sure that all ancestors @@ -467,7 +469,7 @@ impl BlockChain { // find the route between old best block and the new one let best_hash = self.best_block_hash(); let best_details = self.block_details(&best_hash).expect("best block hash is invalid!"); - let route = self._tree_route((best_details, best_hash), (details, hash.clone())); + let route = self._tree_route((&best_details, &best_hash), (&details, &hash)); match route.blocks.len() { // its our parent @@ -494,7 +496,7 @@ impl BlockChain { total_difficulty: total_difficulty }; - (batch, Some(best_block)) + (batch, Some(best_block), details) } /// Returns true if transaction is known. diff --git a/src/client.rs b/src/client.rs index cf8b0fd7c..04d372786 100644 --- a/src/client.rs +++ b/src/client.rs @@ -6,8 +6,8 @@ use error::*; use header::BlockNumber; use spec::Spec; use engine::Engine; -use block_queue::BlockQueue; -use db_queue::{DbQueue, StateDBCommit}; +use block_queue::{BlockQueue, BlockQueueInfo}; +use db_queue::{DbQueue}; use service::NetSyncMessage; use env_info::LastHashes; use verification::*; @@ -47,13 +47,6 @@ impl fmt::Display for BlockChainInfo { } } -/// Block queue status -#[derive(Debug)] -pub struct BlockQueueStatus { - /// TODO [arkpar] Please document me - pub full: bool, -} - /// TODO [arkpar] Please document me pub type TreeRoute = ::blockchain::TreeRoute; @@ -99,7 +92,7 @@ pub trait BlockChainClient : Sync + Send { fn import_block(&self, bytes: Bytes) -> ImportResult; /// Get block queue information. - fn queue_status(&self) -> BlockQueueStatus; + fn queue_info(&self) -> BlockQueueInfo; /// Clear block queue and abort all import activity. fn clear_queue(&self); @@ -149,8 +142,6 @@ impl Client { let mut opts = Options::new(); opts.set_max_open_files(256); opts.create_if_missing(true); - opts.set_disable_data_sync(true); - opts.set_disable_auto_compactions(true); /*opts.set_use_fsync(false); opts.set_bytes_per_sync(8388608); opts.set_disable_data_sync(false); @@ -199,7 +190,6 @@ impl Client { /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self, _io: &IoChannel) { - let mut bad = HashSet::new(); let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.write().unwrap().drain(128); @@ -243,11 +233,7 @@ impl Client { } } - let db = match self.uncommited_states.read().unwrap().get(&header.parent_hash) { - Some(db) => db.clone(), - None => self.state_db.clone(), - }; - + let db = self.state_db.clone(); let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { Ok(b) => b, Err(e) => { @@ -272,15 +258,6 @@ impl Client { return; } } - /* - let db = result.drain(); - self.uncommited_states.write().unwrap().insert(header.hash(), db.clone()); - self.db_queue.write().unwrap().queue(StateDBCommit { - now: header.number(), - hash: header.hash().clone(), - end: ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap())), - db: db, - });*/ self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } @@ -369,10 +346,8 @@ impl BlockChainClient for Client { self.block_queue.write().unwrap().import_block(bytes) } - fn queue_status(&self) -> BlockQueueStatus { - BlockQueueStatus { - full: false - } + fn queue_info(&self) -> BlockQueueInfo { + self.block_queue.read().unwrap().queue_info() } fn clear_queue(&self) { diff --git a/src/service.rs b/src/service.rs index 4034ce841..b9b510d5e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -21,6 +21,7 @@ pub type NetSyncMessage = NetworkIoMessage; pub struct ClientService { net_service: NetworkService, client: Arc, + sync: Arc, } impl ClientService { @@ -33,7 +34,7 @@ impl ClientService { dir.push(".parity"); dir.push(H64::from(spec.genesis_header().hash()).hex()); let client = try!(Client::new(spec, &dir, net_service.io().channel())); - EthSync::register(&mut net_service, client.clone()); + let sync = EthSync::register(&mut net_service, client.clone()); let client_io = Arc::new(ClientIoHandler { client: client.clone() }); @@ -42,6 +43,7 @@ impl ClientService { Ok(ClientService { net_service: net_service, client: client, + sync: sync, }) } @@ -53,6 +55,12 @@ impl ClientService { /// TODO [arkpar] Please document me pub fn client(&self) -> Arc { self.client.clone() + + } + + /// Get shared sync handler + pub fn sync(&self) -> Arc { + self.sync.clone() } } diff --git a/src/sync/chain.rs b/src/sync/chain.rs index ce748da08..f44f058c8 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -107,6 +107,10 @@ pub struct SyncStatus { pub blocks_total: usize, /// Number of blocks downloaded so far. pub blocks_received: usize, + /// Total number of connected peers + pub num_peers: usize, + /// Total number of active peers + pub num_active_peers: usize, } #[derive(PartialEq, Eq, Debug)] @@ -195,8 +199,10 @@ impl ChainSync { start_block_number: self.starting_block, last_imported_block_number: self.last_imported_block, highest_block_number: self.highest_block, - blocks_total: (self.last_imported_block - self.starting_block) as usize, - blocks_received: (self.highest_block - self.starting_block) as usize, + blocks_received: (self.last_imported_block - self.starting_block) as usize, + blocks_total: (self.highest_block - self.starting_block) as usize, + num_peers: self.peers.len(), + num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(), } } @@ -544,7 +550,7 @@ impl ChainSync { fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) { self.clear_peer_download(peer_id); - if io.chain().queue_status().full { + if io.chain().queue_info().full { self.pause_sync(); return; } @@ -971,7 +977,7 @@ impl ChainSync { } /// Maintain other peers. Send out any new blocks and transactions - pub fn maintain_sync(&mut self, _io: &mut SyncIo) { + pub fn _maintain_sync(&mut self, _io: &mut SyncIo) { } } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index c87dee569..078100084 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -27,7 +27,6 @@ use std::sync::*; use client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use sync::chain::ChainSync; -use util::TimerToken; use service::SyncMessage; use sync::io::NetSyncIo; @@ -38,8 +37,6 @@ mod range_collection; #[cfg(test)] mod tests; -const SYNC_TIMER: usize = 0; - /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint @@ -52,12 +49,13 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, chain: Arc) { + pub fn register(service: &mut NetworkService, chain: Arc) -> Arc { let sync = Arc::new(EthSync { chain: chain, sync: RwLock::new(ChainSync::new()), }); service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); + sync } /// Get sync status @@ -77,8 +75,7 @@ impl EthSync { } impl NetworkProtocolHandler for EthSync { - fn initialize(&self, io: &NetworkContext) { - io.register_timer(SYNC_TIMER, 1000).unwrap(); + fn initialize(&self, _io: &NetworkContext) { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { @@ -92,12 +89,6 @@ impl NetworkProtocolHandler for EthSync { fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.deref()), *peer); } - - fn timeout(&self, io: &NetworkContext, timer: TimerToken) { - if timer == SYNC_TIMER { - self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); - } - } } From 0ce15af91e3c38ba6f86ee9689b2184ed85fa3d0 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 04:57:02 +0100 Subject: [PATCH 030/138] Removed db_queue --- src/client.rs | 11 +---- src/db_queue.rs | 111 ------------------------------------------------ src/lib.rs | 2 - 3 files changed, 2 insertions(+), 122 deletions(-) delete mode 100644 src/db_queue.rs diff --git a/src/client.rs b/src/client.rs index 04d372786..6f47d0601 100644 --- a/src/client.rs +++ b/src/client.rs @@ -7,7 +7,6 @@ use header::BlockNumber; use spec::Spec; use engine::Engine; use block_queue::{BlockQueue, BlockQueueInfo}; -use db_queue::{DbQueue}; use service::NetSyncMessage; use env_info::LastHashes; use verification::*; @@ -127,7 +126,6 @@ pub struct Client { engine: Arc>, state_db: JournalDB, block_queue: RwLock, - db_queue: RwLock, report: RwLock, uncommited_states: RwLock>, import_lock: Mutex<()> @@ -172,20 +170,15 @@ impl Client { } let state_db = JournalDB::new_with_arc(db); - let client = Arc::new(Client { + Ok(Arc::new(Client { chain: chain, engine: engine.clone(), state_db: state_db, block_queue: RwLock::new(BlockQueue::new(engine, message_channel)), - db_queue: RwLock::new(DbQueue::new()), report: RwLock::new(Default::default()), uncommited_states: RwLock::new(HashMap::new()), import_lock: Mutex::new(()), - }); - - let weak = Arc::downgrade(&client); - client.db_queue.read().unwrap().start(weak); - Ok(client) + })) } /// This is triggered by a message coming from a block queue when the block is ready for insertion diff --git a/src/db_queue.rs b/src/db_queue.rs deleted file mode 100644 index 242fd9cc4..000000000 --- a/src/db_queue.rs +++ /dev/null @@ -1,111 +0,0 @@ -//! A queue of state changes that are written to database in background. -use std::thread::{JoinHandle, self}; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use util::*; -use engine::Engine; -use client::Client; - -/// State DB commit params -pub struct StateDBCommit { - /// Database to commit - pub db: JournalDB, - /// Starting block number - pub now: u64, - /// Block ahash - pub hash: H256, - /// End block number + hash - pub end: Option<(u64, H256)>, -} - -/// A queue of state changes that are written to database in background. -pub struct DbQueue { - more_to_write: Arc, - queue: Arc>>, - writer: Mutex>>, - deleting: Arc, -} - -impl DbQueue { - /// Creates a new queue instance. - pub fn new() -> DbQueue { - let queue = Arc::new(Mutex::new(VecDeque::new())); - let more_to_write = Arc::new(Condvar::new()); - let deleting = Arc::new(AtomicBool::new(false)); - - DbQueue { - more_to_write: more_to_write.clone(), - queue: queue.clone(), - writer: Mutex::new(None), - deleting: deleting.clone(), - } - } - - /// Start processing the queue - pub fn start(&self, client: Weak) { - let writer = { - let queue = self.queue.clone(); - let client = client.clone(); - let more_to_write = self.more_to_write.clone(); - let deleting = self.deleting.clone(); - thread::Builder::new().name("DB Writer".to_string()).spawn(move || DbQueue::writer_loop(client, queue, more_to_write, deleting)).expect("Error creating db writer thread") - }; - mem::replace(self.writer.lock().unwrap().deref_mut(), Some(writer)); - } - - fn writer_loop(client: Weak, queue: Arc>>, wait: Arc, deleting: Arc) { - while !deleting.load(AtomicOrdering::Relaxed) { - let mut batch = { - let mut locked = queue.lock().unwrap(); - while locked.is_empty() && !deleting.load(AtomicOrdering::Relaxed) { - locked = wait.wait(locked).unwrap(); - } - - if deleting.load(AtomicOrdering::Relaxed) { - return; - } - mem::replace(locked.deref_mut(), VecDeque::new()) - }; - - for mut state in batch.drain(..) { //TODO: make this a single write transaction - match state.db.commit(state.now, &state.hash, state.end.clone()) { - Ok(_) => (), - Err(e) => { - warn!(target: "client", "State DB commit failed: {:?}", e); - } - } - client.upgrade().unwrap().clear_state(&state.hash); - } - - } - } - - /// Add a state to the queue - pub fn queue(&self, state: StateDBCommit) { - let mut queue = self.queue.lock().unwrap(); - queue.push_back(state); - self.more_to_write.notify_all(); - } -} - -impl Drop for DbQueue { - fn drop(&mut self) { - self.deleting.store(true, AtomicOrdering::Relaxed); - self.more_to_write.notify_all(); - mem::replace(self.writer.lock().unwrap().deref_mut(), None).unwrap().join().unwrap(); - } -} - -#[cfg(test)] -mod tests { - use util::*; - use spec::*; - use queue::*; - - #[test] - fn test_block_queue() { - // TODO better test - let spec = Spec::new_test(); - let engine = spec.to_engine().unwrap(); - let _ = BlockQueue::new(Arc::new(engine), IoChannel::disconnected()); - } -} diff --git a/src/lib.rs b/src/lib.rs index 58d84764a..68c658267 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -149,7 +149,5 @@ pub mod sync; pub mod block; /// TODO [arkpar] Please document me pub mod verification; -/// TODO [debris] Please document me -pub mod db_queue; pub mod block_queue; pub mod ethereum; From 74d34614cfb55226ef082661ee7f1537e6cb9ab2 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 05:20:47 +0100 Subject: [PATCH 031/138] Tests compiling again --- src/block_queue.rs | 2 +- src/sync/tests.rs | 99 ++++++++++++++++++++++++---------------------- 2 files changed, 52 insertions(+), 49 deletions(-) diff --git a/src/block_queue.rs b/src/block_queue.rs index 1ffd0f7ec..c2cbb35b6 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -257,7 +257,7 @@ impl Drop for BlockQueue { mod tests { use util::*; use spec::*; - use queue::*; + use block_queue::*; #[test] fn test_block_queue() { diff --git a/src/sync/tests.rs b/src/sync/tests.rs index 05d7ac317..b0c93a790 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -1,38 +1,40 @@ use util::*; -use client::{BlockChainClient, BlockStatus, TreeRoute, BlockQueueStatus, BlockChainInfo}; +use client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo}; +use block_queue::BlockQueueInfo; use header::{Header as BlockHeader, BlockNumber}; use error::*; use sync::io::SyncIo; use sync::chain::ChainSync; struct TestBlockChainClient { - blocks: HashMap, - numbers: HashMap, + blocks: RwLock>, + numbers: RwLock>, genesis_hash: H256, - last_hash: H256, - difficulty: U256 + last_hash: RwLock, + difficulty: RwLock, } impl TestBlockChainClient { fn new() -> TestBlockChainClient { let mut client = TestBlockChainClient { - blocks: HashMap::new(), - numbers: HashMap::new(), + blocks: RwLock::new(HashMap::new()), + numbers: RwLock::new(HashMap::new()), genesis_hash: H256::new(), - last_hash: H256::new(), - difficulty: From::from(0), + last_hash: RwLock::new(H256::new()), + difficulty: RwLock::new(From::from(0)), }; client.add_blocks(1, true); // add genesis block - client.genesis_hash = client.last_hash.clone(); + client.genesis_hash = client.last_hash.read().unwrap().clone(); client } pub fn add_blocks(&mut self, count: usize, empty: bool) { - for n in self.numbers.len()..(self.numbers.len() + count) { + let len = self.numbers.read().unwrap().len(); + for n in len..(len + count) { let mut header = BlockHeader::new(); header.difficulty = From::from(n); - header.parent_hash = self.last_hash.clone(); + header.parent_hash = self.last_hash.read().unwrap().clone(); header.number = n as BlockNumber; let mut uncles = RlpStream::new_list(if empty {0} else {1}); if !empty { @@ -50,12 +52,12 @@ impl TestBlockChainClient { impl BlockChainClient for TestBlockChainClient { fn block_header(&self, h: &H256) -> Option { - self.blocks.get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec()) + self.blocks.read().unwrap().get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec()) } fn block_body(&self, h: &H256) -> Option { - self.blocks.get(h).map(|r| { + self.blocks.read().unwrap().get(h).map(|r| { let mut stream = RlpStream::new_list(2); stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); @@ -64,30 +66,30 @@ impl BlockChainClient for TestBlockChainClient { } fn block(&self, h: &H256) -> Option { - self.blocks.get(h).map(|b| b.clone()) + self.blocks.read().unwrap().get(h).map(|b| b.clone()) } fn block_status(&self, h: &H256) -> BlockStatus { - match self.blocks.get(h) { + match self.blocks.read().unwrap().get(h) { Some(_) => BlockStatus::InChain, None => BlockStatus::Unknown } } fn block_header_at(&self, n: BlockNumber) -> Option { - self.numbers.get(&(n as usize)).and_then(|h| self.block_header(h)) + self.numbers.read().unwrap().get(&(n as usize)).and_then(|h| self.block_header(h)) } fn block_body_at(&self, n: BlockNumber) -> Option { - self.numbers.get(&(n as usize)).and_then(|h| self.block_body(h)) + self.numbers.read().unwrap().get(&(n as usize)).and_then(|h| self.block_body(h)) } fn block_at(&self, n: BlockNumber) -> Option { - self.numbers.get(&(n as usize)).map(|h| self.blocks.get(h).unwrap().clone()) + self.numbers.read().unwrap().get(&(n as usize)).map(|h| self.blocks.read().unwrap().get(h).unwrap().clone()) } fn block_status_at(&self, n: BlockNumber) -> BlockStatus { - if (n as usize) < self.blocks.len() { + if (n as usize) < self.blocks.read().unwrap().len() { BlockStatus::InChain } else { BlockStatus::Unknown @@ -110,14 +112,14 @@ impl BlockChainClient for TestBlockChainClient { None } - fn import_block(&mut self, b: Bytes) -> ImportResult { + fn import_block(&self, b: Bytes) -> ImportResult { let header = Rlp::new(&b).val_at::(0); let number: usize = header.number as usize; - if number > self.blocks.len() { - panic!("Unexpected block number. Expected {}, got {}", self.blocks.len(), number); + if number > self.blocks.read().unwrap().len() { + panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); } if number > 0 { - match self.blocks.get(&header.parent_hash) { + match self.blocks.read().unwrap().get(&header.parent_hash) { Some(parent) => { let parent = Rlp::new(parent).val_at::(0); if parent.number != (header.number - 1) { @@ -129,43 +131,44 @@ impl BlockChainClient for TestBlockChainClient { } } } - if number == self.numbers.len() { - self.difficulty = self.difficulty + header.difficulty; - self.last_hash = header.hash(); - self.blocks.insert(header.hash(), b); - self.numbers.insert(number, header.hash()); + if number == self.numbers.read().unwrap().len() { + *self.difficulty.write().unwrap().deref_mut() += header.difficulty; + mem::replace(self.last_hash.write().unwrap().deref_mut(), header.hash()); + self.blocks.write().unwrap().insert(header.hash(), b); + self.numbers.write().unwrap().insert(number, header.hash()); let mut parent_hash = header.parent_hash; if number > 0 { let mut n = number - 1; - while n > 0 && self.numbers[&n] != parent_hash { - *self.numbers.get_mut(&n).unwrap() = parent_hash.clone(); + while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { + *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); n -= 1; - parent_hash = Rlp::new(&self.blocks[&parent_hash]).val_at::(0).parent_hash; + parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; } } } else { - self.blocks.insert(header.hash(), b.to_vec()); + self.blocks.write().unwrap().insert(header.hash(), b.to_vec()); } Ok(()) } - fn queue_status(&self) -> BlockQueueStatus { - BlockQueueStatus { + fn queue_info(&self) -> BlockQueueInfo { + BlockQueueInfo { full: false, + queue_size: 0, } } - fn clear_queue(&mut self) { + fn clear_queue(&self) { } fn chain_info(&self) -> BlockChainInfo { BlockChainInfo { - total_difficulty: self.difficulty, - pending_total_difficulty: self.difficulty, + total_difficulty: *self.difficulty.read().unwrap(), + pending_total_difficulty: *self.difficulty.read().unwrap(), genesis_hash: self.genesis_hash.clone(), - best_block_hash: self.last_hash.clone(), - best_block_number: self.blocks.len() as BlockNumber - 1, + best_block_hash: self.last_hash.read().unwrap().clone(), + best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, } } } @@ -208,7 +211,7 @@ impl<'p> SyncIo for TestIo<'p> { Ok(()) } - fn chain<'a>(&'a mut self) -> &'a mut BlockChainClient { + fn chain<'a>(&'a self) -> &'a BlockChainClient { self.chain } } @@ -275,7 +278,7 @@ impl TestNet { None => {} } let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None)); + p.sync._maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None)); } } @@ -300,7 +303,7 @@ fn full_sync_two_peers() { net.peer_mut(2).chain.add_blocks(1000, false); net.sync(); assert!(net.peer(0).chain.block_at(1000).is_some()); - assert_eq!(net.peer(0).chain.blocks, net.peer(1).chain.blocks); + assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); } #[test] @@ -313,7 +316,7 @@ fn full_sync_empty_blocks() { } net.sync(); assert!(net.peer(0).chain.block_at(1000).is_some()); - assert_eq!(net.peer(0).chain.blocks, net.peer(1).chain.blocks); + assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); } #[test] @@ -329,9 +332,9 @@ fn forked_sync() { net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 net.peer_mut(2).chain.add_blocks(10, true); // peer 1 has the best chain of 601 blocks - let peer1_chain = net.peer(1).chain.numbers.clone(); + let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); net.sync(); - assert_eq!(net.peer(0).chain.numbers, peer1_chain); - assert_eq!(net.peer(1).chain.numbers, peer1_chain); - assert_eq!(net.peer(2).chain.numbers, peer1_chain); + assert_eq!(net.peer(0).chain.numbers.read().unwrap().deref(), &peer1_chain); + assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain); + assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain); } From bb4e31b22438756a4ff9b8c9a91dba5fede91374 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Fri, 22 Jan 2016 08:04:52 +0100 Subject: [PATCH 032/138] Marking test as heavy / ignored --- src/tests/state.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tests/state.rs b/src/tests/state.rs index 03a5e05e1..6b7561142 100644 --- a/src/tests/state.rs +++ b/src/tests/state.rs @@ -74,19 +74,19 @@ fn do_json_test(json_data: &[u8]) -> Vec { declare_test!{StateTests_stBlockHashTest, "StateTests/stBlockHashTest"} declare_test!{StateTests_stCallCodes, "StateTests/stCallCodes"} declare_test!{ignore => StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"} //<< Out of stack -declare_test!{StateTests_stDelegatecallTest, "StateTests/stDelegatecallTest"} //<< FAIL - gas too high +declare_test!{StateTests_stDelegatecallTest, "StateTests/stDelegatecallTest"} declare_test!{StateTests_stExample, "StateTests/stExample"} declare_test!{StateTests_stInitCodeTest, "StateTests/stInitCodeTest"} declare_test!{StateTests_stLogTests, "StateTests/stLogTests"} declare_test!{StateTests_stMemoryStressTest, "StateTests/stMemoryStressTest"} declare_test!{StateTests_stMemoryTest, "StateTests/stMemoryTest"} declare_test!{StateTests_stPreCompiledContracts, "StateTests/stPreCompiledContracts"} -declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} //<< Too long -declare_test!{ignore => StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} //<< Out of stack +declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} //<< Too long +declare_test!{ignore => StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} //<< Out of stack declare_test!{StateTests_stRefundTest, "StateTests/stRefundTest"} declare_test!{StateTests_stSolidityTest, "StateTests/stSolidityTest"} -declare_test!{ignore => StateTests_stSpecialTest, "StateTests/stSpecialTest"} //<< Signal 11 -declare_test!{ignore => StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} //<< Signal 11 +declare_test!{ignore => StateTests_stSpecialTest, "StateTests/stSpecialTest"} //<< Out of Stack +declare_test!{ignore => StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} //<< Out of stack declare_test!{StateTests_stTransactionTest, "StateTests/stTransactionTest"} declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"} declare_test!{StateTests_stWalletTest, "StateTests/stWalletTest"} From b00e4b9d95a348c6500a1d9a82762777dc4fbe32 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Fri, 22 Jan 2016 08:07:53 +0100 Subject: [PATCH 033/138] Marking more tests as 'heavy' --- src/tests/state.rs | 4 ++-- src/tests/transaction.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tests/state.rs b/src/tests/state.rs index 6b7561142..d768d1bb8 100644 --- a/src/tests/state.rs +++ b/src/tests/state.rs @@ -78,8 +78,8 @@ declare_test!{StateTests_stDelegatecallTest, "StateTests/stDelegatecallTest"} declare_test!{StateTests_stExample, "StateTests/stExample"} declare_test!{StateTests_stInitCodeTest, "StateTests/stInitCodeTest"} declare_test!{StateTests_stLogTests, "StateTests/stLogTests"} -declare_test!{StateTests_stMemoryStressTest, "StateTests/stMemoryStressTest"} -declare_test!{StateTests_stMemoryTest, "StateTests/stMemoryTest"} +declare_test!{heavy => StateTests_stMemoryStressTest, "StateTests/stMemoryStressTest"} +declare_test!{heavy => StateTests_stMemoryTest, "StateTests/stMemoryTest"} declare_test!{StateTests_stPreCompiledContracts, "StateTests/stPreCompiledContracts"} declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} //<< Too long declare_test!{ignore => StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} //<< Out of stack diff --git a/src/tests/transaction.rs b/src/tests/transaction.rs index 967738c91..cd378c35f 100644 --- a/src/tests/transaction.rs +++ b/src/tests/transaction.rs @@ -70,7 +70,7 @@ declare_test!{TransactionTests/Homestead/ttWrongRLPTransaction} declare_test!{TransactionTests/RandomTests/tr201506052141PYTHON}*/ declare_test!{TransactionTests_ttTransactionTest, "TransactionTests/ttTransactionTest"} -declare_test!{TransactionTests_tt10mbDataField, "TransactionTests/tt10mbDataField"} +declare_test!{heavy => TransactionTests_tt10mbDataField, "TransactionTests/tt10mbDataField"} declare_test!{TransactionTests_ttWrongRLPTransaction, "TransactionTests/ttWrongRLPTransaction"} declare_test!{TransactionTests_Homestead_ttTransactionTest, "TransactionTests/Homestead/ttTransactionTest"} declare_test!{heavy => TransactionTests_Homestead_tt10mbDataField, "TransactionTests/Homestead/tt10mbDataField"} From 1970d79337a7f22ce5187107a3cad8d7f6426cc2 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Fri, 22 Jan 2016 08:09:16 +0100 Subject: [PATCH 034/138] Adding git hook --- hook.sh | 3 +++ 1 file changed, 3 insertions(+) create mode 100755 hook.sh diff --git a/hook.sh b/hook.sh new file mode 100755 index 000000000..a82f926b9 --- /dev/null +++ b/hook.sh @@ -0,0 +1,3 @@ +#!/bin/sh +echo "#!/bin/sh\ncargo test" >> ./.git/hooks/pre-push +chmod +x ./.git/hooks/pre-push From 9159d3fea0873b766329c77e10273044d0d4f427 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 14:03:42 +0100 Subject: [PATCH 035/138] Indent --- src/bin/client/main.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 147ea2be2..638ac8216 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -36,10 +36,7 @@ fn main() { let exit = Arc::new(Condvar::new()); let e = exit.clone(); - CtrlC::set_handler(move || { - e.notify_all(); - }); - + CtrlC::set_handler(move || { e.notify_all(); }); let mutex = Mutex::new(()); let _ = exit.wait(mutex.lock().unwrap()).unwrap(); } From b5f0a412657276b2dba90fa17c5a436925772f5f Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 15:58:52 +0100 Subject: [PATCH 036/138] Fixed tests and tweaked sync progress report --- src/bin/client/main.rs | 5 +++-- src/block_queue.rs | 10 +++++++--- src/sync/mod.rs | 2 +- src/sync/tests.rs | 6 ++++-- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 638ac8216..5c79b7755 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -69,7 +69,7 @@ impl Informant { let sync_info = sync.status(); if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, {} downloaded, {} queued ···// {} ({}) bl {} ({}) ex ]", + println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, {} downloaded, {}+{} queued ···// {} ({}) bl {} ({}) ex ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, @@ -79,7 +79,8 @@ impl Informant { sync_info.num_active_peers, sync_info.num_peers, sync_info.blocks_received, - queue_info.queue_size, + queue_info.unverified_queue_size, + queue_info.verified_queue_size, cache_info.blocks, cache_info.blocks as isize - last_cache_info.blocks as isize, diff --git a/src/block_queue.rs b/src/block_queue.rs index c2cbb35b6..239c559c5 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -15,8 +15,10 @@ use service::*; pub struct BlockQueueInfo { /// Indicates that queue is full pub full: bool, - /// Number of queued blocks - pub queue_size: usize, + /// Number of queued blocks pending verification + pub unverified_queue_size: usize, + /// Number of verified queued blocks pending import + pub verified_queue_size: usize, } /// A queue of blocks. Sits between network or other I/O and the BlockChain. @@ -235,9 +237,11 @@ impl BlockQueue { /// Get queue status. pub fn queue_info(&self) -> BlockQueueInfo { + let verification = self.verification.lock().unwrap(); BlockQueueInfo { full: false, - queue_size: self.verification.lock().unwrap().unverified.len(), + verified_queue_size: verification.verified.len(), + unverified_queue_size: verification.unverified.len(), } } } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 078100084..78a5d7613 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -17,7 +17,7 @@ /// fn main() { /// let mut service = NetworkService::start().unwrap(); /// let dir = env::temp_dir(); -/// let client = Arc::new(Client::new(ethereum::new_frontier(), &dir).unwrap()); +/// let client = Client::new(ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); /// EthSync::register(&mut service, client); /// } /// ``` diff --git a/src/sync/tests.rs b/src/sync/tests.rs index 24b012b5b..7f8a1748b 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -131,7 +131,8 @@ impl BlockChainClient for TestBlockChainClient { } } } - if number == self.numbers.read().unwrap().len() { + let len = self.numbers.read().unwrap().len(); + if number == len { *self.difficulty.write().unwrap().deref_mut() += header.difficulty; mem::replace(self.last_hash.write().unwrap().deref_mut(), header.hash()); self.blocks.write().unwrap().insert(header.hash(), b); @@ -155,7 +156,8 @@ impl BlockChainClient for TestBlockChainClient { fn queue_info(&self) -> BlockQueueInfo { BlockQueueInfo { full: false, - queue_size: 0, + verified_queue_size: 0, + unverified_queue_size: 0, } } From e27d628e753a94924cb32e698fc9e2ef022fe0a7 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Sat, 23 Jan 2016 10:41:13 +0100 Subject: [PATCH 037/138] Changing delegatecall logic --- Cargo.toml | 2 +- src/evm/ext.rs | 16 +++----------- src/evm/interpreter.rs | 14 ++++++------ src/evm/tests.rs | 13 +++-------- src/externalities.rs | 49 +++++++++++------------------------------- src/tests/executive.rs | 20 +++-------------- 6 files changed, 30 insertions(+), 84 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 714af5693..d8c92e0aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" ctrlc = "1.0" -clippy = "*" # Always newest, since we use nightly +clippy = "0.0.37" # Always newest, since we use nightly [features] jit = ["evmjit"] diff --git a/src/evm/ext.rs b/src/evm/ext.rs index 7e2f0f47f..748bc89da 100644 --- a/src/evm/ext.rs +++ b/src/evm/ext.rs @@ -55,19 +55,9 @@ pub trait Ext { /// and true if subcall was successfull. fn call(&mut self, gas: &U256, - address: &Address, - value: &U256, - data: &[u8], - code_address: &Address, - output: &mut [u8]) -> MessageCallResult; - - /// Delegate Message call. - /// - /// Returns Err, if we run out of gas. - /// Otherwise returns call_result which contains gas left - /// and true if subcall was successfull. - fn delegatecall(&mut self, - gas: &U256, + sender_address: &Address, + receive_address: &Address, + value: Option<&U256>, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult; diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index 7f0db421d..276f2873b 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -587,7 +587,7 @@ impl Interpreter { // and we don't want to copy let input = unsafe { ::std::mem::transmute(mem.read_slice(in_off, in_size)) }; let output = mem.writeable_slice(out_off, out_size); - ext.delegatecall(&call_gas, input, &code_address, output) + ext.call(&call_gas, ¶ms.sender, ¶ms.address, None, input, &code_address, output) }; return match call_result { @@ -609,11 +609,6 @@ impl Interpreter { let value = stack.pop_back(); - let address = match instruction == instructions::CALL { - true => &code_address, - false => ¶ms.address - }; - let in_off = stack.pop_back(); let in_size = stack.pop_back(); let out_off = stack.pop_back(); @@ -624,6 +619,11 @@ impl Interpreter { false => U256::zero() }; + let (sender_address, receive_address) = match instruction == instructions::CALL { + true => (¶ms.address, &code_address), + false => (¶ms.address, ¶ms.address) + }; + let can_call = ext.balance(¶ms.address) >= value && ext.depth() < ext.schedule().max_depth; if !can_call { @@ -636,7 +636,7 @@ impl Interpreter { // and we don't want to copy let input = unsafe { ::std::mem::transmute(mem.read_slice(in_off, in_size)) }; let output = mem.writeable_slice(out_off, out_size); - ext.call(&call_gas, address, &value, input, &code_address, output) + ext.call(&call_gas, sender_address, receive_address, Some(&value), input, &code_address, output) }; return match call_result { diff --git a/src/evm/tests.rs b/src/evm/tests.rs index aaf082093..cf4262914 100644 --- a/src/evm/tests.rs +++ b/src/evm/tests.rs @@ -61,22 +61,15 @@ impl Ext for FakeExt { fn call(&mut self, _gas: &U256, - _address: &Address, - _value: &U256, + _sender_address: &Address, + _receive_address: &Address, + _value: Option<&U256>, _data: &[u8], _code_address: &Address, _output: &mut [u8]) -> MessageCallResult { unimplemented!(); } - fn delegatecall(&mut self, - _gas: &U256, - _data: &[u8], - _address: &Address, - _output: &mut [u8]) -> MessageCallResult { - unimplemented!(); - } - fn extcode(&self, address: &Address) -> Bytes { self.codes.get(address).unwrap_or(&Bytes::new()).clone() } diff --git a/src/externalities.rs b/src/externalities.rs index 293a60999..d8b5d6110 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -17,18 +17,16 @@ pub enum OutputPolicy<'a> { /// Transaction properties that externalities need to know about. pub struct OriginInfo { - sender: Address, - value: U256, address: Address, origin: Address, - gas_price: U256 + gas_price: U256, + value: U256 } impl OriginInfo { /// Populates origin info from action params. pub fn from(params: &ActionParams) -> Self { OriginInfo { - sender: params.sender.clone(), address: params.address.clone(), origin: params.origin.clone(), gas_price: params.gas_price.clone(), @@ -136,52 +134,31 @@ impl<'a> Ext for Externalities<'a> { } } - fn delegatecall(&mut self, - gas: &U256, - data: &[u8], - code_address: &Address, - output: &mut [u8]) -> MessageCallResult { - - let params = ActionParams { - code_address: code_address.clone(), - address: self.origin_info.address.clone(), - sender: self.origin_info.sender.clone(), - origin: self.origin_info.origin.clone(), - gas: *gas, - gas_price: self.origin_info.gas_price.clone(), - value: ActionValue::Apparent(self.origin_info.value.clone()), - code: self.state.code(code_address), - data: Some(data.to_vec()), - }; - - let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth); - - match ex.call(params, self.substate, BytesRef::Fixed(output)) { - Ok(gas_left) => MessageCallResult::Success(gas_left), - _ => MessageCallResult::Failed - } - } - fn call(&mut self, gas: &U256, - address: &Address, - value: &U256, + sender_address: &Address, + receive_address: &Address, + value: Option<&U256>, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult { - let params = ActionParams { + let mut params = ActionParams { + sender: sender_address.clone(), + address: receive_address.clone(), + value: ActionValue::Apparent(self.origin_info.value.clone()), code_address: code_address.clone(), - address: address.clone(), - sender: self.origin_info.address.clone(), origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: ActionValue::Transfer(value.clone()), code: self.state.code(code_address), data: Some(data.to_vec()), }; + if let Some(value) = value { + params.value = ActionValue::Transfer(value.clone()); + } + let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth); match ex.call(params, self.substate, BytesRef::Fixed(output)) { diff --git a/src/tests/executive.rs b/src/tests/executive.rs index cfca30740..0604c9992 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -101,8 +101,9 @@ impl<'a> Ext for TestExt<'a> { fn call(&mut self, gas: &U256, + _sender_address: &Address, receive_address: &Address, - value: &U256, + value: Option<&U256>, data: &[u8], _code_address: &Address, _output: &mut [u8]) -> MessageCallResult { @@ -110,22 +111,7 @@ impl<'a> Ext for TestExt<'a> { data: data.to_vec(), destination: Some(receive_address.clone()), gas_limit: *gas, - value: *value - }); - MessageCallResult::Success(*gas) - } - - fn delegatecall(&mut self, - gas: &U256, - data: &[u8], - _code_address: &Address, - _output: &mut [u8]) -> MessageCallResult { - - self.callcreates.push(CallCreate { - data: data.to_vec(), - destination: None, - gas_limit: *gas, - value: U256::zero() + value: *value.unwrap() }); MessageCallResult::Success(*gas) } From 58bda2209c70783de13d5eee0c66078959bdbef5 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Sat, 23 Jan 2016 10:42:25 +0100 Subject: [PATCH 038/138] Reverting clippy wildcard --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index d8c92e0aa..714af5693 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" ctrlc = "1.0" -clippy = "0.0.37" # Always newest, since we use nightly +clippy = "*" # Always newest, since we use nightly [features] jit = ["evmjit"] From 3f705c452c2281bfba5658b9dcc32d17663e5ea0 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Sat, 23 Jan 2016 10:43:13 +0100 Subject: [PATCH 039/138] Removed wildcard from clippy version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 714af5693..b74ab4e0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" ctrlc = "1.0" -clippy = "*" # Always newest, since we use nightly +clippy = "0.0.37" [features] jit = ["evmjit"] From b059d32485712a63a8bd87fcae22f9c0bde945e2 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 23 Jan 2016 23:53:20 +0100 Subject: [PATCH 040/138] Argument parsing from CLI. Closes #179 --- Cargo.toml | 1 + res/ethereum/frontier.json | 5 +++++ src/bin/client/main.rs | 45 +++++++++++++++++++++++++++++++++---- src/lib.rs | 2 ++ src/service.rs | 9 ++++++-- src/spec.rs | 12 +++++++++- util/src/network/host.rs | 10 +-------- util/src/network/service.rs | 7 +++--- 8 files changed, 71 insertions(+), 20 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 04c4bf956..75f983253 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ time = "0.1" evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" +docopt = "0.6" [features] jit = ["evmjit"] diff --git a/res/ethereum/frontier.json b/res/ethereum/frontier.json index eaf0ef4c1..9cb456ce8 100644 --- a/res/ethereum/frontier.json +++ b/res/ethereum/frontier.json @@ -26,6 +26,11 @@ "gasLimit": "0x1388", "stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" }, + "nodes": [ + "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", + "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", + "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303" + ], "accounts": { "0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "linear": { "base": 3000, "word": 0 } } }, "0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "linear": { "base": 60, "word": 12 } } }, diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 3335d8a72..0d0368480 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -1,6 +1,10 @@ +#![feature(plugin)] +//#![plugin(docopt_macros)] + +extern crate docopt; +extern crate rustc_serialize; extern crate ethcore_util as util; extern crate ethcore; -extern crate rustc_serialize; extern crate log; extern crate env_logger; @@ -14,8 +18,27 @@ use ethcore::service::ClientService; use ethcore::ethereum; use ethcore::blockchain::CacheSize; use ethcore::sync::*; +use docopt::Docopt; -fn setup_log() { +const USAGE: &'static str = " +Parity. Ethereum Client. + +Usage: + parity [options] + parity [options] ... + +Options: + -l --logging LOGGING Specify the logging level + -h --help Show this screen. +"; + +#[derive(Debug, RustcDecodable)] +struct Args { + arg_enode: Option>, + flag_logging: Option, +} + +fn setup_log(init: &Option) { let mut builder = LogBuilder::new(); builder.filter(None, LogLevelFilter::Info); @@ -23,14 +46,28 @@ fn setup_log() { builder.parse(&env::var("RUST_LOG").unwrap()); } + if let &Some(ref x) = init { + builder.parse(x); + } + builder.init().unwrap(); } fn main() { - setup_log(); + let args: Args = Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()); + + setup_log(&args.flag_logging); + let spec = ethereum::new_frontier(); - let mut service = ClientService::start(spec).unwrap(); + + let init_nodes = match &args.arg_enode { + &None => spec.nodes().clone(), + &Some(ref enodes) => enodes.clone(), + }; + + let mut service = ClientService::start(spec, &init_nodes).unwrap(); let io_handler = Box::new(ClientIoHandler { client: service.client(), timer: 0, info: Default::default() }); + service.io().register_handler(io_handler).expect("Error registering IO handler"); loop { let mut cmd = String::new(); diff --git a/src/lib.rs b/src/lib.rs index a5b6c3dae..a25fa9338 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -89,6 +89,8 @@ extern crate evmjit; #[macro_use] extern crate ethcore_util as util; +// NOTE: Add doc parser exception for these pub declarations. + /// TODO [Gav Wood] Please document me pub mod common; /// TODO [Tomusdrw] Please document me diff --git a/src/service.rs b/src/service.rs index 30565b37a..07ff4873c 100644 --- a/src/service.rs +++ b/src/service.rs @@ -13,8 +13,8 @@ pub struct ClientService { impl ClientService { /// Start the service in a separate thread. - pub fn start(spec: Spec) -> Result { - let mut net_service = try!(NetworkService::start()); + pub fn start(spec: Spec, init_nodes: &Vec) -> Result { + let mut net_service = try!(NetworkService::start(init_nodes)); info!("Starting {}", net_service.host_info()); info!("Configured for {} using {} engine", spec.name, spec.engine_name); let mut dir = env::home_dir().unwrap(); @@ -33,6 +33,11 @@ impl ClientService { }) } + /// Get the network service. + pub fn add_node(&mut self, _enode: &str) { + unimplemented!(); + } + /// TODO [arkpar] Please document me pub fn io(&mut self) -> &mut IoService { self.net_service.io() diff --git a/src/spec.rs b/src/spec.rs index 24c0e4eda..bb47edacf 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -73,6 +73,9 @@ pub struct Spec { /// TODO [Gav Wood] Please document me pub engine_name: String, + /// Known nodes on the network in enode format. + pub nodes: Vec, + // Parameters concerning operation of the specific engine we're using. // Name -> RLP-encoded value /// TODO [Gav Wood] Please document me @@ -127,6 +130,9 @@ impl Spec { self.state_root_memo.read().unwrap().as_ref().unwrap().clone() } + /// Get the known knodes of the network in enode format. + pub fn nodes(&self) -> &Vec { &self.nodes } + /// TODO [Gav Wood] Please document me pub fn genesis_header(&self) -> Header { Header { @@ -196,6 +202,10 @@ impl FromJson for Spec { } } + let nodes = if let Some(&Json::Array(ref ns)) = json.find("nodes") { + ns.iter().filter_map(|n| if let &Json::String(ref s) = n { Some(s.to_string()) } else {None}).collect() + } else { Vec::new() }; + let genesis = &json["genesis"];//.as_object().expect("No genesis object in JSON"); let (seal_fields, seal_rlp) = { @@ -212,12 +222,12 @@ impl FromJson for Spec { ) } }; - Spec { name: json.find("name").map(|j| j.as_string().unwrap()).unwrap_or("unknown").to_string(), engine_name: json["engineName"].as_string().unwrap().to_string(), engine_params: json_to_rlp_map(&json["params"]), + nodes: nodes, builtins: builtins, parent_hash: H256::from_str(&genesis["parentHash"].as_string().unwrap()[2..]).unwrap(), author: Address::from_str(&genesis["author"].as_string().unwrap()[2..]).unwrap(), diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 37b58f1f0..67a9c56db 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -259,7 +259,7 @@ impl Host where Message: Send { } } - fn add_node(&mut self, id: &str) { + pub fn add_node(&mut self, id: &str) { match Node::from_str(id) { Err(e) => { warn!("Could not add node: {:?}", e); }, Ok(n) => { @@ -560,14 +560,6 @@ impl IoHandler> for Host where Messa io.event_loop.timeout_ms(Token(NODETABLE_MAINTAIN), 7200).unwrap(); let port = self.info.config.listen_address.port(); self.info.listen_port = port; - - self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303"); -/* // GO bootnodes - self.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE - self.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR - self.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG - // ETH/DEV cpp-ethereum (poc-9.ethdev.com) - self.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303");*/ } fn stream_hup<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, stream: StreamToken) { diff --git a/util/src/network/service.rs b/util/src/network/service.rs index 4c333b8af..c5083e981 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -13,9 +13,10 @@ pub struct NetworkService where Message: Send + 'static { impl NetworkService where Message: Send + 'static { /// Starts IO event loop - pub fn start() -> Result, UtilError> { + pub fn start(init_nodes: &Vec) -> Result, UtilError> { let mut io_service = try!(IoService::>::start()); - let host = Box::new(Host::new()); + let mut host = Box::new(Host::new()); + for n in init_nodes { host.add_node(&n); } let host_info = host.info.client_version.clone(); info!("NetworkService::start(): id={:?}", host.info.id()); try!(io_service.register_handler(host)); @@ -55,7 +56,5 @@ impl NetworkService where Message: Send + 'static { pub fn io(&mut self) -> &mut IoService> { &mut self.io_service } - - } From 7528c725bbbdee2f9ae6b6fdb162fa58267a9d46 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 24 Jan 2016 00:10:55 +0100 Subject: [PATCH 041/138] Additional comment. --- src/bin/client/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 30f320e9d..0194a0a91 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -1,4 +1,5 @@ #![feature(plugin)] +// TODO: uncomment once this can be made to work. //#![plugin(docopt_macros)] extern crate docopt; From acbb50d70092392409d45bc704c4169527569dd6 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 18:13:59 +0100 Subject: [PATCH 042/138] Stream deregistration --- util/src/io/mod.rs | 2 ++ util/src/io/service.rs | 18 ++++++++++++++++++ util/src/network/connection.rs | 13 +++++++++++++ util/src/network/handshake.rs | 6 ++++++ util/src/network/host.rs | 24 ++++++++++++++++++++++-- util/src/network/session.rs | 5 +++++ 6 files changed, 66 insertions(+), 2 deletions(-) diff --git a/util/src/io/mod.rs b/util/src/io/mod.rs index 48c02f6ee..1906e7438 100644 --- a/util/src/io/mod.rs +++ b/util/src/io/mod.rs @@ -74,6 +74,8 @@ pub trait IoHandler: Send + Sync where Message: Send + Sync + Clone + ' fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} /// Re-register a stream with the event loop fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} + /// Deregister a stream. Called whenstream is removed from event loop + fn deregister_stream(&self, _stream: StreamToken, _event_loop: &mut EventLoop>) {} } /// TODO [arkpar] Please document me diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 8a1653056..9a3187f8e 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -42,6 +42,10 @@ pub enum IoMessage where Message: Send + Clone + Sized { handler_id: HandlerId, token: StreamToken, }, + DeregisterStream { + handler_id: HandlerId, + token: StreamToken, + }, UpdateStreamRegistration { handler_id: HandlerId, token: StreamToken, @@ -83,6 +87,7 @@ impl IoContext where Message: Send + Clone + 'static { })); Ok(()) } + /// Register a new IO stream. pub fn register_stream(&self, token: StreamToken) -> Result<(), UtilError> { try!(self.channel.send_io(IoMessage::RegisterStream { @@ -92,6 +97,15 @@ impl IoContext where Message: Send + Clone + 'static { Ok(()) } + /// Deregister an IO stream. + pub fn deregister_stream(&self, token: StreamToken) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::DeregisterStream { + token: token, + handler_id: self.handler, + })); + Ok(()) + } + /// Reregister an IO stream. pub fn update_registration(&self, token: StreamToken) -> Result<(), UtilError> { try!(self.channel.send_io(IoMessage::UpdateStreamRegistration { @@ -214,6 +228,10 @@ impl Handler for IoManager where Message: Send + Clone + Sync let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone(); handler.register_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); }, + IoMessage::DeregisterStream { handler_id, token } => { + let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone(); + handler.deregister_stream(token, event_loop); + }, IoMessage::UpdateStreamRegistration { handler_id, token } => { let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone(); handler.update_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index 33cafd708..c4c8d29c6 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -150,6 +150,13 @@ impl Connection { Err(e) }) } + + /// Delete connection registration. Should be called at the end of the IO handler. + pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> io::Result<()> { + trace!(target: "net", "connection deregister; token={:?}", self.token); + event_loop.deregister(&self.socket).ok(); // ignore errors here + Ok(()) + } } /// RLPx packet @@ -371,6 +378,12 @@ impl EncryptedConnection { try!(self.connection.update_socket(reg, event_loop)); Ok(()) } + + /// Delete connection registration. This should be called at the end of the event loop. + pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.deregister_socket(event_loop)); + Ok(()) + } } #[test] diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index ddeab17b7..acac77d04 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -134,6 +134,12 @@ impl Handshake { Ok(()) } + /// Delete registration + pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.deregister_socket(event_loop)); + Ok(()) + } + /// Parse, validate and confirm auth message fn read_auth(&mut self, host: &HostInfo, data: &[u8]) -> Result<(), UtilError> { trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr()); diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 724f11ddf..559f69f49 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -285,7 +285,7 @@ impl Host where Message: Send + Sync + Clone { host.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR host.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG // ETH/DEV cpp-ethereum (poc-9.ethdev.com) - host.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"); + //host.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"); host } @@ -395,7 +395,8 @@ impl Host where Message: Send + Sync + Clone { }; let nonce = self.info.write().unwrap().next_nonce(); - if self.connections.write().unwrap().insert_with(|token| { + let mut connections = self.connections.write().unwrap(); + if connections.insert_with(|token| { let mut handshake = Handshake::new(token, id, socket, &nonce).expect("Can't create handshake"); handshake.start(io, &self.info.read().unwrap(), true).and_then(|_| io.register_stream(token)).unwrap_or_else (|e| { debug!(target: "net", "Handshake create error: {:?}", e); @@ -552,6 +553,7 @@ impl Host where Message: Send + Sync + Clone { _ => {}, } } + io.deregister_stream(token).expect("Error deregistering stream"); } for p in to_disconnect { let h = self.handlers.read().unwrap().get(p).unwrap().clone(); @@ -664,6 +666,24 @@ impl IoHandler> for Host where Messa } } + fn deregister_stream(&self, stream: StreamToken, event_loop: &mut EventLoop>>) { + match stream { + FIRST_CONNECTION ... LAST_CONNECTION => { + let mut connections = self.connections.write().unwrap(); + if let Some(connection) = connections.get(stream).cloned() { + match *connection.lock().unwrap().deref() { + ConnectionEntry::Handshake(ref h) => h.deregister_socket(event_loop).expect("Error deregistering socket"), + ConnectionEntry::Session(ref s) => s.deregister_socket(event_loop).expect("Error deregistering session socket"), + } + connections.remove(stream); + } + }, + NODETABLE_RECEIVE => event_loop.deregister(self.udp_socket.lock().unwrap().deref()).unwrap(), + TCP_ACCEPT => event_loop.deregister(self.tcp_listener.lock().unwrap().deref()).unwrap(), + _ => warn!("Unexpected stream deregistration") + } + } + fn update_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop>>) { match stream { FIRST_CONNECTION ... LAST_CONNECTION => { diff --git a/util/src/network/session.rs b/util/src/network/session.rs index 8f580f476..2817f008d 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -131,6 +131,11 @@ impl Session { self.connection.update_socket(reg, event_loop) } + /// Delete registration + pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> Result<(), UtilError> { + self.connection.deregister_socket(event_loop) + } + /// Send a protocol packet to peer. pub fn send_packet(&mut self, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), UtilError> { let mut i = 0usize; From cfb8671b004433ce9794c5f644d710fcdd9c9b6f Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 23 Jan 2016 02:36:58 +0100 Subject: [PATCH 043/138] Networking bugfixes --- res/ethereum/tests | 2 +- src/bin/client/main.rs | 8 +++++- src/client.rs | 10 +++---- src/service.rs | 4 +-- util/src/network/connection.rs | 8 +++--- util/src/network/host.rs | 49 +++++++++++++++++++++++----------- util/src/network/mod.rs | 5 ++-- util/src/network/service.rs | 6 ++--- 8 files changed, 56 insertions(+), 36 deletions(-) diff --git a/res/ethereum/tests b/res/ethereum/tests index dc86e6359..e838fd909 160000 --- a/res/ethereum/tests +++ b/res/ethereum/tests @@ -1 +1 @@ -Subproject commit dc86e6359675440aea59ddb48648a01c799925d8 +Subproject commit e838fd90998fc5502d0b7c9427a4c231f9a6953d diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 5c79b7755..e2892693c 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -30,7 +30,13 @@ fn setup_log() { fn main() { setup_log(); let spec = ethereum::new_frontier(); - let mut service = ClientService::start(spec).unwrap(); + let mut net_settings = NetworkConfiguration::new(); + let args: Vec<_> = env::args().collect(); + if args.len() == 2 { + net_settings.boot_nodes.push(args[1].trim_matches('\"').to_string()); + } + + let mut service = ClientService::start(spec, net_settings).unwrap(); let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), sync: service.sync() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); diff --git a/src/client.rs b/src/client.rs index 6f47d0601..4461f3d7b 100644 --- a/src/client.rs +++ b/src/client.rs @@ -162,14 +162,10 @@ impl Client { let db = Arc::new(DB::open(&opts, state_path.to_str().unwrap()).unwrap()); let engine = Arc::new(try!(spec.to_engine())); - { - let mut state_db = JournalDB::new_with_arc(db.clone()); - if engine.spec().ensure_db_good(&mut state_db) { - state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); - } + let mut state_db = JournalDB::new_with_arc(db.clone()); + if engine.spec().ensure_db_good(&mut state_db) { + state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } - let state_db = JournalDB::new_with_arc(db); - Ok(Arc::new(Client { chain: chain, engine: engine.clone(), diff --git a/src/service.rs b/src/service.rs index 6ff7d46be..ce2c30720 100644 --- a/src/service.rs +++ b/src/service.rs @@ -26,8 +26,8 @@ pub struct ClientService { impl ClientService { /// Start the service in a separate thread. - pub fn start(spec: Spec) -> Result { - let mut net_service = try!(NetworkService::start()); + pub fn start(spec: Spec, net_config: NetworkConfiguration) -> Result { + let mut net_service = try!(NetworkService::start(net_config)); info!("Starting {}", net_service.host_info()); info!("Configured for {} using {} engine", spec.name, spec.engine_name); let mut dir = env::home_dir().unwrap(); diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index c4c8d29c6..fb7bfb734 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -137,8 +137,8 @@ impl Connection { pub fn register_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { trace!(target: "net", "connection register; token={:?}", reg); event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - error!("Failed to register {:?}, {:?}", reg, e); - Err(e) + debug!("Failed to register {:?}, {:?}", reg, e); + Ok(()) }) } @@ -146,8 +146,8 @@ impl Connection { pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { trace!(target: "net", "connection reregister; token={:?}", reg); event_loop.reregister( &self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - error!("Failed to reregister {:?}, {:?}", reg, e); - Err(e) + debug!("Failed to reregister {:?}, {:?}", reg, e); + Ok(()) }) } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 559f69f49..8fffdde69 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -28,22 +28,32 @@ const IDEAL_PEERS: u32 = 10; const MAINTENANCE_TIMEOUT: u64 = 1000; #[derive(Debug)] -struct NetworkConfiguration { - listen_address: SocketAddr, - public_address: SocketAddr, - nat_enabled: bool, - discovery_enabled: bool, - pin: bool, +/// Network service configuration +pub struct NetworkConfiguration { + /// IP address to listen for incoming connections + pub listen_address: SocketAddr, + /// IP address to advertise + pub public_address: SocketAddr, + /// Enable NAT configuration + pub nat_enabled: bool, + /// Enable discovery + pub discovery_enabled: bool, + /// Pin to boot nodes only + pub pin: bool, + /// List of initial node addresses + pub boot_nodes: Vec, } impl NetworkConfiguration { - fn new() -> NetworkConfiguration { + /// Create a new instance of default settings. + pub fn new() -> NetworkConfiguration { NetworkConfiguration { listen_address: SocketAddr::from_str("0.0.0.0:30304").unwrap(), public_address: SocketAddr::from_str("0.0.0.0:30304").unwrap(), nat_enabled: true, discovery_enabled: true, pin: false, + boot_nodes: Vec::new(), } } } @@ -246,8 +256,8 @@ pub struct Host where Message: Send + Sync + Clone { } impl Host where Message: Send + Sync + Clone { - pub fn new() -> Host { - let config = NetworkConfiguration::new(); + /// Create a new instance + pub fn new(config: NetworkConfiguration) -> Host { let addr = config.listen_address; // Setup the server socket let tcp_listener = TcpListener::bind(&addr).unwrap(); @@ -279,13 +289,19 @@ impl Host where Message: Send + Sync + Clone { None => warn!("No public network interface"), */ - // self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303"); - // GO bootnodes - host.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE - host.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR - host.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG + let boot_nodes = host.info.read().unwrap().config.boot_nodes.clone(); + if boot_nodes.is_empty() { + // GO bootnodes + host.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE + host.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR + host.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG + } + else { + for n in boot_nodes { + host.add_node(&n); + } + } // ETH/DEV cpp-ethereum (poc-9.ethdev.com) - //host.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"); host } @@ -517,7 +533,8 @@ impl Host where Message: Send + Sync + Clone { } fn start_session(&self, token: StreamToken, io: &IoContext>) { - self.connections.write().unwrap().replace_with(token, |c| { + let mut connections = self.connections.write().unwrap(); + connections.replace_with(token, |c| { match Arc::try_unwrap(c).ok().unwrap().into_inner().unwrap() { ConnectionEntry::Handshake(h) => { let session = Session::new(h, io, &self.info.read().unwrap()).expect("Session creation error"); diff --git a/util/src/network/mod.rs b/util/src/network/mod.rs index 0c734442d..c175ab0a2 100644 --- a/util/src/network/mod.rs +++ b/util/src/network/mod.rs @@ -40,7 +40,7 @@ /// } /// /// fn main () { -/// let mut service = NetworkService::::start().expect("Error creating network service"); +/// let mut service = NetworkService::::start(NetworkConfiguration::new()).expect("Error creating network service"); /// service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]); /// /// // Wait for quit condition @@ -71,6 +71,7 @@ pub use network::host::NetworkIoMessage; pub use network::host::NetworkIoMessage::User as UserMessage; /// TODO [arkpar] Please document me pub use network::error::NetworkError; +pub use network::host::NetworkConfiguration; use io::TimerToken; @@ -130,6 +131,6 @@ fn test_net_service() { } } - let mut service = NetworkService::::start().expect("Error creating network service"); + let mut service = NetworkService::::start(NetworkConfiguration::new()).expect("Error creating network service"); service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]).unwrap(); } diff --git a/util/src/network/service.rs b/util/src/network/service.rs index 67d2b55e2..8c29a8042 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -1,6 +1,6 @@ use std::sync::*; use error::*; -use network::{NetworkProtocolHandler}; +use network::{NetworkProtocolHandler, NetworkConfiguration}; use network::error::{NetworkError}; use network::host::{Host, NetworkIoMessage, ProtocolId}; use io::*; @@ -14,9 +14,9 @@ pub struct NetworkService where Message: Send + Sync + Clone + 'static impl NetworkService where Message: Send + Sync + Clone + 'static { /// Starts IO event loop - pub fn start() -> Result, UtilError> { + pub fn start(config: NetworkConfiguration) -> Result, UtilError> { let mut io_service = try!(IoService::>::start()); - let host = Arc::new(Host::new()); + let host = Arc::new(Host::new(config)); let host_info = host.client_version(); info!("NetworkService::start(): id={:?}", host.client_id()); try!(io_service.register_handler(host)); From cbc4828eea9d1a795d9cd64bb3bd873749761f41 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 23 Jan 2016 18:44:45 +0100 Subject: [PATCH 044/138] Continue sync on new hashes --- src/sync/chain.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/sync/chain.rs b/src/sync/chain.rs index c55cb0a6e..aaba701c2 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -475,6 +475,9 @@ impl ChainSync { } } }; + if max_height != x!(0) { + self.sync_peer(io, peer_id, true); + } Ok(()) } From cd250d4959dfb423292192f1af11b92c5ca5ac8c Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 24 Jan 2016 18:53:54 +0100 Subject: [PATCH 045/138] Incoming connections; Tests --- cov.sh | 2 +- src/bin/client/main.rs | 2 +- util/src/network/connection.rs | 26 ++++++--- util/src/network/handshake.rs | 25 +++++--- util/src/network/host.rs | 53 +++++++++++++---- util/src/network/mod.rs | 56 ++---------------- util/src/network/service.rs | 9 +++ util/src/network/stats.rs | 51 ++++++++++++++++ util/src/network/tests.rs | 103 +++++++++++++++++++++++++++++++++ 9 files changed, 249 insertions(+), 78 deletions(-) create mode 100644 util/src/network/stats.rs create mode 100644 util/src/network/tests.rs diff --git a/cov.sh b/cov.sh index 9f2a87a47..371746a39 100755 --- a/cov.sh +++ b/cov.sh @@ -17,5 +17,5 @@ fi cargo test --no-run || exit $? mkdir -p target/coverage -kcov --exclude-pattern ~/.multirust --include-pattern src --verify target/coverage target/debug/ethcore* +kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1 --include-pattern src --verify target/coverage target/debug/ethcore* xdg-open target/coverage/index.html diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index e2892693c..ff81aae31 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -33,7 +33,7 @@ fn main() { let mut net_settings = NetworkConfiguration::new(); let args: Vec<_> = env::args().collect(); if args.len() == 2 { - net_settings.boot_nodes.push(args[1].trim_matches('\"').to_string()); + net_settings.boot_nodes = Some(vec! [args[1].trim_matches('\"').to_string()]); } let mut service = ClientService::start(spec, net_settings).unwrap(); diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index fb7bfb734..7ed8c3c18 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -1,3 +1,4 @@ +use std::sync::Arc; use std::collections::VecDeque; use mio::{Handler, Token, EventSet, EventLoop, PollOpt, TryRead, TryWrite}; use mio::tcp::*; @@ -10,6 +11,7 @@ use error::*; use io::{IoContext, StreamToken}; use network::error::NetworkError; use network::handshake::Handshake; +use network::stats::NetworkStats; use crypto; use rcrypto::blockmodes::*; use rcrypto::aessafe::*; @@ -34,6 +36,8 @@ pub struct Connection { send_queue: VecDeque>, /// Event flags this connection expects interest: EventSet, + /// Shared network staistics + stats: Arc, } /// Connection write status. @@ -47,7 +51,7 @@ pub enum WriteStatus { impl Connection { /// Create a new connection with given id and socket. - pub fn new(token: StreamToken, socket: TcpStream) -> Connection { + pub fn new(token: StreamToken, socket: TcpStream, stats: Arc) -> Connection { Connection { token: token, socket: socket, @@ -55,6 +59,7 @@ impl Connection { rec_buf: Bytes::new(), rec_size: 0, interest: EventSet::hup() | EventSet::readable(), + stats: stats, } } @@ -68,7 +73,6 @@ impl Connection { } /// Readable IO handler. Called when there is some data to be read. - //TODO: return a slice pub fn readable(&mut self) -> io::Result> { if self.rec_size == 0 || self.rec_buf.len() >= self.rec_size { warn!(target:"net", "Unexpected connection read"); @@ -77,9 +81,12 @@ impl Connection { // resolve "multiple applicable items in scope [E0034]" error let sock_ref = ::by_ref(&mut self.socket); match sock_ref.take(max as u64).try_read_buf(&mut self.rec_buf) { - Ok(Some(_)) if self.rec_buf.len() == self.rec_size => { - self.rec_size = 0; - Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new()))) + Ok(Some(size)) if size != 0 => { + self.stats.inc_recv(size); + if self.rec_size != 0 && self.rec_buf.len() == self.rec_size { + self.rec_size = 0; + Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new()))) + } else { Ok(None) } }, Ok(_) => Ok(None), Err(e) => Err(e), @@ -109,14 +116,17 @@ impl Connection { return Ok(WriteStatus::Complete) } match self.socket.try_write_buf(buf) { - Ok(_) if (buf.position() as usize) < send_size => { + Ok(Some(size)) if (buf.position() as usize) < send_size => { self.interest.insert(EventSet::writable()); + self.stats.inc_send(size); Ok(WriteStatus::Ongoing) }, - Ok(_) if (buf.position() as usize) == send_size => { + Ok(Some(size)) if (buf.position() as usize) == send_size => { + self.stats.inc_send(size); Ok(WriteStatus::Complete) }, - Ok(_) => { panic!("Wrote past buffer");}, + Ok(Some(_)) => { panic!("Wrote past buffer");}, + Ok(None) => Ok(WriteStatus::Ongoing), Err(e) => Err(e) } }.and_then(|r| { diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index acac77d04..9b835d5bd 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -1,3 +1,4 @@ +use std::sync::Arc; use mio::*; use mio::tcp::*; use hash::*; @@ -10,6 +11,7 @@ use network::host::{HostInfo}; use network::node::NodeId; use error::*; use network::error::NetworkError; +use network::stats::NetworkStats; use io::{IoContext, StreamToken}; #[derive(PartialEq, Eq, Debug)] @@ -54,10 +56,10 @@ const HANDSHAKE_TIMEOUT: u64 = 30000; impl Handshake { /// Create a new handshake object - pub fn new(token: StreamToken, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result { + pub fn new(token: StreamToken, id: Option<&NodeId>, socket: TcpStream, nonce: &H256, stats: Arc) -> Result { Ok(Handshake { - id: id.clone(), - connection: Connection::new(token, socket), + id: if let Some(id) = id { id.clone()} else { NodeId::new() }, + connection: Connection::new(token, socket, stats), originated: false, state: HandshakeState::New, ecdhe: try!(KeyPair::create()), @@ -143,29 +145,36 @@ impl Handshake { /// Parse, validate and confirm auth message fn read_auth(&mut self, host: &HostInfo, data: &[u8]) -> Result<(), UtilError> { trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr()); - assert!(data.len() == AUTH_PACKET_SIZE); + if data.len() != AUTH_PACKET_SIZE { + debug!(target:"net", "Wrong auth packet size"); + return Err(From::from(NetworkError::BadProtocol)); + } self.auth_cipher = data.to_vec(); let auth = try!(ecies::decrypt(host.secret(), data)); let (sig, rest) = auth.split_at(65); let (hepubk, rest) = rest.split_at(32); let (pubk, rest) = rest.split_at(64); let (nonce, _) = rest.split_at(32); - self.remote_public.clone_from_slice(pubk); + self.id.clone_from_slice(pubk); self.remote_nonce.clone_from_slice(nonce); - let shared = try!(ecdh::agree(host.secret(), &self.remote_public)); + let shared = try!(ecdh::agree(host.secret(), &self.id)); let signature = Signature::from_slice(sig); let spub = try!(ec::recover(&signature, &(&shared ^ &self.remote_nonce))); + self.remote_public = spub.clone(); if &spub.sha3()[..] != hepubk { trace!(target:"net", "Handshake hash mismath with {:?}", self.connection.socket.peer_addr()); return Err(From::from(NetworkError::Auth)); }; - self.write_ack() + Ok(()) } /// Parse and validate ack message fn read_ack(&mut self, host: &HostInfo, data: &[u8]) -> Result<(), UtilError> { trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr()); - assert!(data.len() == ACK_PACKET_SIZE); + if data.len() != ACK_PACKET_SIZE { + debug!(target:"net", "Wrong ack packet size"); + return Err(From::from(NetworkError::BadProtocol)); + } self.ack_cipher = data.to_vec(); let ack = try!(ecies::decrypt(host.secret(), data)); self.remote_public.clone_from_slice(&ack[0..64]); diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 8fffdde69..67338b83f 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -17,6 +17,7 @@ use error::*; use io::*; use network::NetworkProtocolHandler; use network::node::*; +use network::stats::NetworkStats; type Slab = ::slab::Slab; @@ -41,7 +42,9 @@ pub struct NetworkConfiguration { /// Pin to boot nodes only pub pin: bool, /// List of initial node addresses - pub boot_nodes: Vec, + pub boot_nodes: Option>, + /// Use provided node key instead of default + pub use_secret: Option, } impl NetworkConfiguration { @@ -53,9 +56,18 @@ impl NetworkConfiguration { nat_enabled: true, discovery_enabled: true, pin: false, - boot_nodes: Vec::new(), + boot_nodes: None, + use_secret: None, } } + + /// Create new default configuration with sepcified listen port. + pub fn new_with_port(port: u16) -> NetworkConfiguration { + let mut config = NetworkConfiguration::new(); + config.listen_address = SocketAddr::from_str(&format!("0.0.0.0:{}", port)).unwrap(); + config.public_address = SocketAddr::from_str(&format!("0.0.0.0:{}", port)).unwrap(); + config + } } // Tokens @@ -253,6 +265,7 @@ pub struct Host where Message: Send + Sync + Clone { handlers: RwLock>>>, timers: RwLock>, timer_counter: RwLock, + stats: Arc, } impl Host where Message: Send + Sync + Clone { @@ -264,7 +277,7 @@ impl Host where Message: Send + Sync + Clone { let udp_socket = UdpSocket::bound(&addr).unwrap(); let mut host = Host:: { info: RwLock::new(HostInfo { - keys: KeyPair::create().unwrap(), + keys: if let Some(ref secret) = config.use_secret { KeyPair::from_secret(secret.clone()).unwrap() } else { KeyPair::create().unwrap() }, config: config, nonce: H256::random(), protocol_version: 4, @@ -279,6 +292,7 @@ impl Host where Message: Send + Sync + Clone { handlers: RwLock::new(HashMap::new()), timers: RwLock::new(HashMap::new()), timer_counter: RwLock::new(LAST_CONNECTION + 1), + stats: Arc::new(NetworkStats::default()), }; let port = host.info.read().unwrap().config.listen_address.port(); host.info.write().unwrap().deref_mut().listen_port = port; @@ -290,21 +304,24 @@ impl Host where Message: Send + Sync + Clone { */ let boot_nodes = host.info.read().unwrap().config.boot_nodes.clone(); - if boot_nodes.is_empty() { + if boot_nodes.is_none() { // GO bootnodes host.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE host.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR host.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG } else { - for n in boot_nodes { + for n in boot_nodes.unwrap() { host.add_node(&n); } } - // ETH/DEV cpp-ethereum (poc-9.ethdev.com) host } + pub fn stats(&self) -> Arc { + self.stats.clone() + } + pub fn add_node(&mut self, id: &str) { match Node::from_str(id) { Err(e) => { warn!("Could not add node: {:?}", e); }, @@ -382,7 +399,6 @@ impl Host where Message: Send + Sync + Clone { } #[allow(single_match)] - #[allow(block_in_if_condition_stmt)] fn connect_peer(&self, id: &NodeId, io: &IoContext>) { if self.have_session(id) { @@ -409,12 +425,16 @@ impl Host where Message: Send + Sync + Clone { } } }; + self.create_connection(socket, Some(id), io); + } + #[allow(block_in_if_condition_stmt)] + fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext>) { let nonce = self.info.write().unwrap().next_nonce(); let mut connections = self.connections.write().unwrap(); if connections.insert_with(|token| { - let mut handshake = Handshake::new(token, id, socket, &nonce).expect("Can't create handshake"); - handshake.start(io, &self.info.read().unwrap(), true).and_then(|_| io.register_stream(token)).unwrap_or_else (|e| { + let mut handshake = Handshake::new(token, id, socket, &nonce, self.stats.clone()).expect("Can't create handshake"); + handshake.start(io, &self.info.read().unwrap(), id.is_some()).and_then(|_| io.register_stream(token)).unwrap_or_else (|e| { debug!(target: "net", "Handshake create error: {:?}", e); }); Arc::new(Mutex::new(ConnectionEntry::Handshake(handshake))) @@ -423,8 +443,20 @@ impl Host where Message: Send + Sync + Clone { } } - fn accept(&self, _io: &IoContext>) { + fn accept(&self, io: &IoContext>) { trace!(target: "net", "accept"); + loop { + let socket = match self.tcp_listener.lock().unwrap().accept() { + Ok(None) => break, + Ok(Some((sock, _addr))) => sock, + Err(e) => { + warn!("Error accepting connection: {:?}", e); + break + }, + }; + self.create_connection(socket, None, io); + } + io.update_registration(TCP_ACCEPT).expect("Error registering TCP listener"); } #[allow(single_match)] @@ -539,6 +571,7 @@ impl Host where Message: Send + Sync + Clone { ConnectionEntry::Handshake(h) => { let session = Session::new(h, io, &self.info.read().unwrap()).expect("Session creation error"); io.update_registration(token).expect("Error updating session registration"); + self.stats.inc_sessions(); Some(Arc::new(Mutex::new(ConnectionEntry::Session(session)))) }, _ => { None } // handshake expired diff --git a/util/src/network/mod.rs b/util/src/network/mod.rs index c175ab0a2..668cdc8b1 100644 --- a/util/src/network/mod.rs +++ b/util/src/network/mod.rs @@ -1,5 +1,4 @@ -/// Network and general IO module. -/// +/// Network and general IO module. /// Example usage for craeting a network service and adding an IO handler: /// /// ```rust @@ -56,22 +55,20 @@ mod discovery; mod service; mod error; mod node; +mod stats; + +#[cfg(test)] +mod tests; -/// TODO [arkpar] Please document me pub use network::host::PeerId; -/// TODO [arkpar] Please document me pub use network::host::PacketId; -/// TODO [arkpar] Please document me pub use network::host::NetworkContext; -/// TODO [arkpar] Please document me pub use network::service::NetworkService; -/// TODO [arkpar] Please document me pub use network::host::NetworkIoMessage; -/// TODO [arkpar] Please document me pub use network::host::NetworkIoMessage::User as UserMessage; -/// TODO [arkpar] Please document me pub use network::error::NetworkError; pub use network::host::NetworkConfiguration; +pub use network::stats::NetworkStats; use io::TimerToken; @@ -93,44 +90,3 @@ pub trait NetworkProtocolHandler: Sync + Send where Message: Send + Syn fn message(&self, _io: &NetworkContext, _message: &Message) {} } - -#[test] -fn test_net_service() { - - use std::sync::Arc; - struct MyHandler; - - #[derive(Clone)] - struct MyMessage { - data: u32 - } - - impl NetworkProtocolHandler for MyHandler { - fn initialize(&self, io: &NetworkContext) { - io.register_timer(0, 1000).unwrap(); - } - - fn read(&self, _io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer); - } - - fn connected(&self, _io: &NetworkContext, peer: &PeerId) { - println!("Connected {}", peer); - } - - fn disconnected(&self, _io: &NetworkContext, peer: &PeerId) { - println!("Disconnected {}", peer); - } - - fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { - println!("Timeout {}", timer); - } - - fn message(&self, _io: &NetworkContext, message: &MyMessage) { - println!("Message {}", message.data); - } - } - - let mut service = NetworkService::::start(NetworkConfiguration::new()).expect("Error creating network service"); - service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]).unwrap(); -} diff --git a/util/src/network/service.rs b/util/src/network/service.rs index 8c29a8042..cbf400872 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -3,6 +3,7 @@ use error::*; use network::{NetworkProtocolHandler, NetworkConfiguration}; use network::error::{NetworkError}; use network::host::{Host, NetworkIoMessage, ProtocolId}; +use network::stats::{NetworkStats}; use io::*; /// IO Service with networking @@ -10,6 +11,7 @@ use io::*; pub struct NetworkService where Message: Send + Sync + Clone + 'static { io_service: IoService>, host_info: String, + stats: Arc } impl NetworkService where Message: Send + Sync + Clone + 'static { @@ -17,12 +19,14 @@ impl NetworkService where Message: Send + Sync + Clone + 'stat pub fn start(config: NetworkConfiguration) -> Result, UtilError> { let mut io_service = try!(IoService::>::start()); let host = Arc::new(Host::new(config)); + let stats = host.stats().clone(); let host_info = host.client_version(); info!("NetworkService::start(): id={:?}", host.client_id()); try!(io_service.register_handler(host)); Ok(NetworkService { io_service: io_service, host_info: host_info, + stats: stats, }) } @@ -45,5 +49,10 @@ impl NetworkService where Message: Send + Sync + Clone + 'stat pub fn io(&mut self) -> &mut IoService> { &mut self.io_service } + + /// Returns underlying io service. + pub fn stats(&self) -> &NetworkStats { + &self.stats + } } diff --git a/util/src/network/stats.rs b/util/src/network/stats.rs new file mode 100644 index 000000000..02d904985 --- /dev/null +++ b/util/src/network/stats.rs @@ -0,0 +1,51 @@ +//! Network Statistics +use std::sync::atomic::*; + +/// Network statistics structure +#[derive(Default, Debug)] +pub struct NetworkStats { + /// Bytes received + recv: AtomicUsize, + /// Bytes sent + send: AtomicUsize, + /// Total number of sessions created + sessions: AtomicUsize, +} + +impl NetworkStats { + /// Increase bytes received. + #[inline] + pub fn inc_recv(&self, size: usize) { + self.recv.fetch_add(size, Ordering::Relaxed); + } + + /// Increase bytes sent. + #[inline] + pub fn inc_send(&self, size: usize) { + self.send.fetch_add(size, Ordering::Relaxed); + } + + /// Increase number of sessions. + #[inline] + pub fn inc_sessions(&self) { + self.sessions.fetch_add(1, Ordering::Relaxed); + } + + /// Get bytes sent. + #[inline] + pub fn send(&self) -> usize { + self.send.load(Ordering::Relaxed) + } + + /// Get bytes received. + #[inline] + pub fn recv(&self) -> usize { + self.recv.load(Ordering::Relaxed) + } + + /// Get total number of sessions created. + #[inline] + pub fn sessions(&self) -> usize { + self.sessions.load(Ordering::Relaxed) + } +} diff --git a/util/src/network/tests.rs b/util/src/network/tests.rs new file mode 100644 index 000000000..7b0870532 --- /dev/null +++ b/util/src/network/tests.rs @@ -0,0 +1,103 @@ +use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; +use std::thread; +use std::time::*; +use common::*; +use network::*; +use io::TimerToken; +use crypto::KeyPair; + +pub struct TestProtocol { + pub packet: Mutex, + pub got_timeout: AtomicBool, +} + +impl Default for TestProtocol { + fn default() -> Self { + TestProtocol { + packet: Mutex::new(Vec::new()), + got_timeout: AtomicBool::new(false), + } + } +} + +#[derive(Clone)] +pub struct TestProtocolMessage { + payload: u32, +} + +impl TestProtocol { + /// Creates and register protocol with the network service + pub fn register(service: &mut NetworkService) -> Arc { + let handler = Arc::new(TestProtocol::default()); + service.register_protocol(handler.clone(), "test", &[42u8, 43u8]).expect("Error registering test protocol handler"); + handler + } + + pub fn got_packet(&self) -> bool { + self.packet.lock().unwrap().deref()[..] == b"hello"[..] + } + + pub fn got_timeout(&self) -> bool { + self.got_timeout.load(AtomicOrdering::Relaxed) + } +} + +impl NetworkProtocolHandler for TestProtocol { + fn initialize(&self, io: &NetworkContext) { + io.register_timer(0, 10).unwrap(); + } + + fn read(&self, _io: &NetworkContext, _peer: &PeerId, packet_id: u8, data: &[u8]) { + assert_eq!(packet_id, 33); + self.packet.lock().unwrap().extend(data); + } + + fn connected(&self, io: &NetworkContext, _peer: &PeerId) { + io.respond(33, "hello".to_owned().into_bytes()).unwrap(); + } + + fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) { + } + + /// Timer function called after a timeout created with `NetworkContext::timeout`. + fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { + assert_eq!(timer, 0); + self.got_timeout.store(true, AtomicOrdering::Relaxed); + } +} + + +#[test] +fn test_net_service() { + let mut service = NetworkService::::start(NetworkConfiguration::new()).expect("Error creating network service"); + service.register_protocol(Arc::new(TestProtocol::default()), "myproto", &[1u8]).unwrap(); +} + +#[test] +fn test_net_connect() { + let key1 = KeyPair::create().unwrap(); + let mut config1 = NetworkConfiguration::new_with_port(30344); + config1.use_secret = Some(key1.secret().clone()); + config1.boot_nodes = Some(vec![ ]); + let mut config2 = NetworkConfiguration::new_with_port(30345); + config2.boot_nodes = Some(vec![ format!("enode://{}@127.0.0.1:30344", key1.public().hex()) ]); + let mut service1 = NetworkService::::start(config1).unwrap(); + let mut service2 = NetworkService::::start(config2).unwrap(); + let handler1 = TestProtocol::register(&mut service1); + let handler2 = TestProtocol::register(&mut service2); + while !handler1.got_packet() && !handler2.got_packet() { + thread::sleep(Duration::from_millis(50)); + } + assert!(service1.stats().sessions() >= 1); + assert!(service2.stats().sessions() >= 1); +} + +#[test] +fn test_net_timeout() { + let config = NetworkConfiguration::new_with_port(30346); + let mut service = NetworkService::::start(config).unwrap(); + let handler = TestProtocol::register(&mut service); + while !handler.got_timeout() { + thread::sleep(Duration::from_millis(50)); + } +} From 018abc9dcdf610dc7d88e50a30af526492a19179 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 25 Jan 2016 11:54:15 +0100 Subject: [PATCH 046/138] Autogenerate the Args from the docopt macro. --- Cargo.toml | 1 + res/ethereum/tests | 2 +- src/bin/client/main.rs | 29 +++++++++-------------------- 3 files changed, 11 insertions(+), 21 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 82658c6ac..54b1b406e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" docopt = "0.6" +docopt_macros = "0.6" ctrlc = "1.0" clippy = "0.0.37" diff --git a/res/ethereum/tests b/res/ethereum/tests index e838fd909..dc86e6359 160000 --- a/res/ethereum/tests +++ b/res/ethereum/tests @@ -1 +1 @@ -Subproject commit e838fd90998fc5502d0b7c9427a4c231f9a6953d +Subproject commit dc86e6359675440aea59ddb48648a01c799925d8 diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 31bb79d8e..45f45daf7 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -1,7 +1,5 @@ #![feature(plugin)] -// TODO: uncomment once this can be made to work. -//#![plugin(docopt_macros)] - +#![plugin(docopt_macros)] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -20,9 +18,8 @@ use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethcore::blockchain::CacheSize; use ethcore::sync::EthSync; -use docopt::Docopt; -const USAGE: &'static str = " +docopt!(Args derive Debug, " Parity. Ethereum Client. Usage: @@ -32,15 +29,9 @@ Usage: Options: -l --logging LOGGING Specify the logging level -h --help Show this screen. -"; +"); -#[derive(Debug, RustcDecodable)] -struct Args { - arg_enode: Option>, - flag_logging: Option, -} - -fn setup_log(init: &Option) { +fn setup_log(init: &String) { let mut builder = LogBuilder::new(); builder.filter(None, LogLevelFilter::Info); @@ -48,22 +39,20 @@ fn setup_log(init: &Option) { builder.parse(&env::var("RUST_LOG").unwrap()); } - if let &Some(ref x) = init { - builder.parse(x); - } + builder.parse(init); builder.init().unwrap(); } fn main() { - let args: Args = Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()); + let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit()); setup_log(&args.flag_logging); let spec = ethereum::new_frontier(); - let init_nodes = match &args.arg_enode { - &None => spec.nodes().clone(), - &Some(ref enodes) => enodes.clone(), + let init_nodes = match args.arg_enode.len() { + 0 => spec.nodes().clone(), + _ => args.arg_enode.clone(), }; let mut net_settings = NetworkConfiguration::new(); net_settings.boot_nodes = init_nodes; From eed88df0d4404bcd7dd3e44d7e70619aa31d4e5e Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 25 Jan 2016 13:22:29 +0100 Subject: [PATCH 047/138] Fixed tests compilation --- util/src/network/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/network/tests.rs b/util/src/network/tests.rs index 7b0870532..06966abb5 100644 --- a/util/src/network/tests.rs +++ b/util/src/network/tests.rs @@ -78,9 +78,9 @@ fn test_net_connect() { let key1 = KeyPair::create().unwrap(); let mut config1 = NetworkConfiguration::new_with_port(30344); config1.use_secret = Some(key1.secret().clone()); - config1.boot_nodes = Some(vec![ ]); + config1.boot_nodes = vec![ ]; let mut config2 = NetworkConfiguration::new_with_port(30345); - config2.boot_nodes = Some(vec![ format!("enode://{}@127.0.0.1:30344", key1.public().hex()) ]); + config2.boot_nodes = vec![ format!("enode://{}@127.0.0.1:30344", key1.public().hex()) ]; let mut service1 = NetworkService::::start(config1).unwrap(); let mut service2 = NetworkService::::start(config2).unwrap(); let handler1 = TestProtocol::register(&mut service1); From b2050fa639f580a2fdff56579ab28aecc417c5bd Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 25 Jan 2016 16:23:05 +0400 Subject: [PATCH 048/138] untrusted rlp data length check --- util/src/rlp/rlperrors.rs | 2 ++ util/src/rlp/tests.rs | 13 ++++++++++++- util/src/rlp/untrusted_rlp.rs | 7 ++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/util/src/rlp/rlperrors.rs b/util/src/rlp/rlperrors.rs index 97adbced1..18ca928ec 100644 --- a/util/src/rlp/rlperrors.rs +++ b/util/src/rlp/rlperrors.rs @@ -21,6 +21,8 @@ pub enum DecoderError { RlpListLenWithZeroPrefix, /// TODO [debris] Please document me RlpInvalidIndirection, + /// Returned when declared length is inconsistent with data specified after + RlpInconsistentLengthAndData } impl StdError for DecoderError { diff --git a/util/src/rlp/tests.rs b/util/src/rlp/tests.rs index f33cec177..e08901d67 100644 --- a/util/src/rlp/tests.rs +++ b/util/src/rlp/tests.rs @@ -4,7 +4,7 @@ use self::json_tests::rlp as rlptest; use std::{fmt, cmp}; use std::str::FromStr; use rlp; -use rlp::{UntrustedRlp, RlpStream, View, Stream}; +use rlp::{UntrustedRlp, RlpStream, View, Stream, DecoderError}; use uint::U256; #[test] @@ -351,3 +351,14 @@ fn test_decoding_array() { assert_eq!(arr[0], 5); assert_eq!(arr[1], 2); } + +#[test] +fn test_rlp_data_length_check() +{ + let data = vec![0x84, b'c', b'a', b't']; + let rlp = UntrustedRlp::new(&data); + + let as_val: Result = rlp.as_val(); + assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val); +} + diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index 768d058c1..58ab2de60 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -331,10 +331,15 @@ impl<'a> Decoder for BasicDecoder<'a> { Some(l @ 0...0x7f) => Ok(try!(f(&[l]))), // 0-55 bytes Some(l @ 0x80...0xb7) => { - let d = &bytes[1..(1 + l as usize - 0x80)]; + let last_index_of = 1 + l as usize - 0x80; + if bytes.len() < last_index_of { + return Err(DecoderError::RlpInconsistentLengthAndData); + } + let d = &bytes[1..last_index_of]; if l == 0x81 && d[0] < 0x80 { return Err(DecoderError::RlpInvalidIndirection); } + Ok(try!(f(d))) }, // longer than 55 bytes From e61d1f810ee6d3b87c2df91521272f6197a144d0 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 25 Jan 2016 13:39:15 +0100 Subject: [PATCH 049/138] U256<->H256 conversion --- util/src/hash.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/util/src/hash.rs b/util/src/hash.rs index 252877a24..0e4139f3c 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -469,6 +469,18 @@ impl<'_> From<&'_ U256> for H256 { } } +impl From for U256 { + fn from(value: H256) -> U256 { + U256::from(value.bytes()) + } +} + +impl<'_> From<&'_ H256> for U256 { + fn from(value: &'_ H256) -> U256 { + U256::from(value.bytes()) + } +} + impl From for Address { fn from(value: H256) -> Address { unsafe { @@ -562,6 +574,7 @@ pub static ZERO_H256: H256 = H256([0x00; 32]); #[cfg(test)] mod tests { use hash::*; + use uint::*; use std::str::FromStr; #[test] @@ -635,5 +648,18 @@ mod tests { // too short. assert_eq!(H64::from(0), H64::from("0x34567890abcdef")); } + + #[test] + fn from_and_to_u256() { + let u: U256 = x!(0x123456789abcdef0u64); + let h = H256::from(u); + assert_eq!(H256::from(u), H256::from("000000000000000000000000000000000000000000000000123456789abcdef0")); + let h_ref = H256::from(&u); + assert_eq!(h, h_ref); + let r_ref: U256 = From::from(&h); + assert_eq!(r_ref, u); + let r: U256 = From::from(h); + assert_eq!(r, u); + } } From 45059d5119691d11fa155019f8cb02ce140e2af6 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 25 Jan 2016 16:40:59 +0400 Subject: [PATCH 050/138] untrusted rlp data length check --- .gitignore | 3 ++- util/src/rlp/tests.rs | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index e3f377db9..2cdc945d3 100644 --- a/.gitignore +++ b/.gitignore @@ -23,4 +23,5 @@ Cargo.lock /json-tests/target/ - +# jetbrains ide stuff +.idea \ No newline at end of file diff --git a/util/src/rlp/tests.rs b/util/src/rlp/tests.rs index e08901d67..e86cb8ae6 100644 --- a/util/src/rlp/tests.rs +++ b/util/src/rlp/tests.rs @@ -360,5 +360,4 @@ fn test_rlp_data_length_check() let as_val: Result = rlp.as_val(); assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val); -} - +} \ No newline at end of file From 48d1760b7f598c806d6169891eee30fbf13c94d0 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 25 Jan 2016 16:54:52 +0400 Subject: [PATCH 051/138] lost spaces --- util/src/rlp/untrusted_rlp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index 58ab2de60..c0cc89df5 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -333,7 +333,7 @@ impl<'a> Decoder for BasicDecoder<'a> { Some(l @ 0x80...0xb7) => { let last_index_of = 1 + l as usize - 0x80; if bytes.len() < last_index_of { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData); } let d = &bytes[1..last_index_of]; if l == 0x81 && d[0] < 0x80 { From e592a185ef02e7ccdef1efe062f5a85ac730479a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 25 Jan 2016 16:57:41 +0400 Subject: [PATCH 052/138] lost spaces again --- util/src/rlp/untrusted_rlp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index c0cc89df5..7f32ff369 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -333,7 +333,7 @@ impl<'a> Decoder for BasicDecoder<'a> { Some(l @ 0x80...0xb7) => { let last_index_of = 1 + l as usize - 0x80; if bytes.len() < last_index_of { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData); } let d = &bytes[1..last_index_of]; if l == 0x81 && d[0] < 0x80 { From 01ea703783a69575ae40e7abf8fbbf9ad3f36a4b Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 25 Jan 2016 17:27:11 +0400 Subject: [PATCH 053/138] long length checks & indentation --- util/src/rlp/tests.rs | 14 ++++++++++++++ util/src/rlp/untrusted_rlp.rs | 12 ++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/util/src/rlp/tests.rs b/util/src/rlp/tests.rs index e86cb8ae6..737e3ce28 100644 --- a/util/src/rlp/tests.rs +++ b/util/src/rlp/tests.rs @@ -358,6 +358,20 @@ fn test_rlp_data_length_check() let data = vec![0x84, b'c', b'a', b't']; let rlp = UntrustedRlp::new(&data); + let as_val: Result = rlp.as_val(); + assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val); +} + +#[test] +fn test_rlp_long_data_length_check() +{ + let mut data: Vec = vec![0xb8, 255]; + for _ in 0..253 { + data.push(b'c'); + } + + let rlp = UntrustedRlp::new(&data); + let as_val: Result = rlp.as_val(); assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val); } \ No newline at end of file diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index 7f32ff369..af0fd6ff7 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -333,7 +333,7 @@ impl<'a> Decoder for BasicDecoder<'a> { Some(l @ 0x80...0xb7) => { let last_index_of = 1 + l as usize - 0x80; if bytes.len() < last_index_of { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData); } let d = &bytes[1..last_index_of]; if l == 0x81 && d[0] < 0x80 { @@ -346,8 +346,16 @@ impl<'a> Decoder for BasicDecoder<'a> { Some(l @ 0xb8...0xbf) => { let len_of_len = l as usize - 0xb7; let begin_of_value = 1 as usize + len_of_len; + if bytes.len() < begin_of_value { + return Err(DecoderError::RlpInconsistentLengthAndData); + } let len = try!(usize::from_bytes(&bytes[1..begin_of_value])); - Ok(try!(f(&bytes[begin_of_value..begin_of_value + len]))) + + let last_index_of_value = begin_of_value + len; + if bytes.len() < last_index_of_value { + return Err(DecoderError::RlpInconsistentLengthAndData); + } + Ok(try!(f(&bytes[begin_of_value..last_index_of_value]))) } // we are reading value, not a list! _ => Err(DecoderError::RlpExpectedToBeData) From f4ab64fd6bcc7f045f52313dcba3f567013daa0f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 25 Jan 2016 18:47:13 +0400 Subject: [PATCH 054/138] happy path test --- util/src/rlp/tests.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/util/src/rlp/tests.rs b/util/src/rlp/tests.rs index 737e3ce28..d522fa80b 100644 --- a/util/src/rlp/tests.rs +++ b/util/src/rlp/tests.rs @@ -374,4 +374,18 @@ fn test_rlp_long_data_length_check() let as_val: Result = rlp.as_val(); assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val); +} + +#[test] +fn test_the_exact_long_string() +{ + let mut data: Vec = vec![0xb8, 255]; + for _ in 0..255 { + data.push(b'c'); + } + + let rlp = UntrustedRlp::new(&data); + + let as_val: Result = rlp.as_val(); + assert!(as_val.is_ok()); } \ No newline at end of file From 95e96a653f588caa4965c0951168be303b16228b Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 25 Jan 2016 19:13:36 +0400 Subject: [PATCH 055/138] 2 bytes length check --- util/src/rlp/tests.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/util/src/rlp/tests.rs b/util/src/rlp/tests.rs index d522fa80b..84c00e633 100644 --- a/util/src/rlp/tests.rs +++ b/util/src/rlp/tests.rs @@ -388,4 +388,19 @@ fn test_the_exact_long_string() let as_val: Result = rlp.as_val(); assert!(as_val.is_ok()); -} \ No newline at end of file +} + +#[test] +fn test_rlp_2bytes_data_length_check() +{ + let mut data: Vec = vec![0xb9, 2, 255]; // 512+255 + for _ in 0..700 { + data.push(b'c'); + } + + let rlp = UntrustedRlp::new(&data); + + let as_val: Result = rlp.as_val(); + assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val); +} + From f38b736c91499da04fbe6b44345181bf89ed63d8 Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 25 Jan 2016 17:45:26 +0100 Subject: [PATCH 056/138] debugging rpc... --- src/bin/client/rpc/impls/eth.rs | 3 ++- src/bin/client/rpc/impls/net.rs | 4 ++++ src/bin/client/rpc/traits/eth.rs | 2 +- src/bin/client/rpc/traits/net.rs | 2 +- src/client.rs | 2 +- 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/bin/client/rpc/impls/eth.rs b/src/bin/client/rpc/impls/eth.rs index d960114ed..821eacd07 100644 --- a/src/bin/client/rpc/impls/eth.rs +++ b/src/bin/client/rpc/impls/eth.rs @@ -83,6 +83,7 @@ impl EthFilter for EthFilterClient { } fn filter_changes(&self, _: Params) -> Result { - Ok(Value::String(self.client.chain_info().best_block_hash.to_hex())) + println!("filter changes: {:?}", self.client.chain_info().best_block_hash.to_hex()); + Ok(Value::Array(vec![Value::String(self.client.chain_info().best_block_hash.to_hex())])) } } diff --git a/src/bin/client/rpc/impls/net.rs b/src/bin/client/rpc/impls/net.rs index 6e528d156..f0109429c 100644 --- a/src/bin/client/rpc/impls/net.rs +++ b/src/bin/client/rpc/impls/net.rs @@ -9,6 +9,10 @@ impl NetClient { } impl Net for NetClient { + fn version(&self, _: Params) -> Result { + Ok(Value::U64(63)) + } + fn peer_count(&self, _params: Params) -> Result { Ok(Value::U64(0)) } diff --git a/src/bin/client/rpc/traits/eth.rs b/src/bin/client/rpc/traits/eth.rs index dfc72e89a..856111444 100644 --- a/src/bin/client/rpc/traits/eth.rs +++ b/src/bin/client/rpc/traits/eth.rs @@ -59,7 +59,7 @@ pub trait EthFilter: Sized + Send + Sync + 'static { let mut delegate = IoDelegate::new(Arc::new(self)); delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter); delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter); - delegate.add_method("eth_getFilterChanges", EthFilter::new_block_filter); + delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes); delegate } } diff --git a/src/bin/client/rpc/traits/net.rs b/src/bin/client/rpc/traits/net.rs index 7cb7f6bee..63c64edb3 100644 --- a/src/bin/client/rpc/traits/net.rs +++ b/src/bin/client/rpc/traits/net.rs @@ -13,8 +13,8 @@ pub trait Net: Sized + Send + Sync + 'static { /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("peer_count", Net::version); delegate.add_method("net_version", Net::version); + delegate.add_method("net_peerCount", Net::peer_count); delegate } } diff --git a/src/client.rs b/src/client.rs index 4461f3d7b..01da143e5 100644 --- a/src/client.rs +++ b/src/client.rs @@ -269,7 +269,7 @@ impl Client { /// Tick the client. pub fn tick(&self) { - self.chain.read().unwrap().collect_garbage(false); + //self.chain.read().unwrap().collect_garbage(false); } } From 499da19d825d586ac3e82a97d8ff2304333cb726 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 25 Jan 2016 18:56:36 +0100 Subject: [PATCH 057/138] Initial draft of blockchain tests. --- src/block_queue.rs | 7 +++++ src/blockchain.rs | 8 ----- src/client.rs | 22 +++++++++---- src/pod_state.rs | 12 ++++++-- src/spec.rs | 73 ++++++++++++++++++++++---------------------- src/state.rs | 2 +- src/state_diff.rs | 14 ++++----- src/tests/chain.rs | 49 +++++++++++++++++++++++++++++ src/tests/mod.rs | 1 + util/src/standard.rs | 9 +++--- 10 files changed, 133 insertions(+), 64 deletions(-) create mode 100644 src/tests/chain.rs diff --git a/src/block_queue.rs b/src/block_queue.rs index 239c559c5..36539bfff 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -21,6 +21,11 @@ pub struct BlockQueueInfo { pub verified_queue_size: usize, } +impl BlockQueueInfo { + /// The total size of the queues. + pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size } +} + /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. pub struct BlockQueue { @@ -99,6 +104,7 @@ impl BlockQueue { fn verify(verification: Arc>, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc) { while !deleting.load(AtomicOrdering::Relaxed) { + { let mut lock = verification.lock().unwrap(); while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Relaxed) { @@ -139,6 +145,7 @@ impl BlockQueue { }, Err(err) => { let mut v = verification.lock().unwrap(); + flushln!("Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); v.bad.insert(block_hash.clone()); v.verifying.retain(|e| e.hash != block_hash); diff --git a/src/blockchain.rs b/src/blockchain.rs index 39390de97..3bd31688a 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -283,13 +283,6 @@ impl BlockChain { bc } - /// Ensure that the best block does indeed have a state_root in the state DB. - /// If it doesn't, then rewind down until we find one that does and delete data to ensure that - /// later blocks will be reimported. - pub fn ensure_good(&mut self, _state: &JournalDB) { - unimplemented!(); - } - /// Returns a tree route between `from` and `to`, which is a tuple of: /// /// - a vector of hashes of all blocks, ordered from `from` to `to`. @@ -392,7 +385,6 @@ impl BlockChain { } } - /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. diff --git a/src/client.rs b/src/client.rs index 4461f3d7b..795bca546 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,3 +1,5 @@ +use std::thread; +use std::time; use util::*; use rocksdb::{Options, DB}; use blockchain::{BlockChain, BlockProvider, CacheSize}; @@ -121,6 +123,7 @@ impl ClientReport { } /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. +/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client { chain: Arc>, engine: Arc>, @@ -140,7 +143,8 @@ impl Client { let mut opts = Options::new(); opts.set_max_open_files(256); opts.create_if_missing(true); - /*opts.set_use_fsync(false); + opts.set_use_fsync(false); + /* opts.set_bytes_per_sync(8388608); opts.set_disable_data_sync(false); opts.set_block_cache_size_mb(1024); @@ -177,16 +181,22 @@ impl Client { })) } + /// Flush the block import queue. + pub fn flush_queue(&self) { + flushln!("Flushing queue {:?}", self.block_queue.read().unwrap().queue_info()); + while self.block_queue.read().unwrap().queue_info().unverified_queue_size > 0 { + thread::sleep(time::Duration::from_millis(20)); + flushln!("Flushing queue [waited] {:?}", self.block_queue.read().unwrap().queue_info()); + } + } + /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self, _io: &IoChannel) { let mut bad = HashSet::new(); let _import_lock = self.import_lock.lock(); - let blocks = self.block_queue.write().unwrap().drain(128); - if blocks.is_empty() { - return; - } - for block in blocks { + for block in self.block_queue.write().unwrap().drain(128) { + flushln!("Importing block..."); if bad.contains(&block.header.parent_hash) { self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); diff --git a/src/pod_state.rs b/src/pod_state.rs index 1ea8382a5..15ae6a8ae 100644 --- a/src/pod_state.rs +++ b/src/pod_state.rs @@ -1,17 +1,25 @@ use util::*; use pod_account::*; -#[derive(Debug,Clone,PartialEq,Eq)] +#[derive(Debug,Clone,PartialEq,Eq,Default)] /// TODO [Gav Wood] Please document me pub struct PodState (BTreeMap); impl PodState { /// Contruct a new object from the `m`. - pub fn new(m: BTreeMap) -> PodState { PodState(m) } + pub fn new() -> PodState { Default::default() } + + /// Contruct a new object from the `m`. + pub fn from(m: BTreeMap) -> PodState { PodState(m) } /// Get the underlying map. pub fn get(&self) -> &BTreeMap { &self.0 } + /// Get the root hash of the trie of the RLP of this. + pub fn root(&self) -> H256 { + sec_trie_root(self.0.iter().map(|(k, v)| (k.to_vec(), v.rlp())).collect()) + } + /// Drain object to get the underlying map. pub fn drain(self) -> BTreeMap { self.0 } } diff --git a/src/spec.rs b/src/spec.rs index 9f98d5e2a..9e97595b9 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -1,6 +1,7 @@ use common::*; use flate2::read::GzDecoder; use engine::*; +use pod_state::*; use null_engine::*; /// Converts file from base64 gzipped bytes to json @@ -40,28 +41,6 @@ fn json_to_rlp_map(json: &Json) -> HashMap { }) } -//TODO: add code and data -#[derive(Debug)] -/// Genesis account data. Does no thave a DB overlay cache -pub struct GenesisAccount { - // Balance of the account. - balance: U256, - // Nonce of the account. - nonce: U256, -} - -impl GenesisAccount { - /// TODO [arkpar] Please document me - pub fn rlp(&self) -> Bytes { - let mut stream = RlpStream::new_list(4); - stream.append(&self.nonce); - stream.append(&self.balance); - stream.append(&SHA3_NULL_RLP); - stream.append(&SHA3_EMPTY); - stream.out() - } -} - /// Parameters for a block chain; includes both those intrinsic to the design of the /// chain and those to be interpreted by the active chain engine. #[derive(Debug)] @@ -83,7 +62,7 @@ pub struct Spec { // Builtin-contracts are here for now but would like to abstract into Engine API eventually. /// TODO [Gav Wood] Please document me - pub builtins: HashMap, + pub builtins: BTreeMap, // Genesis params. /// TODO [Gav Wood] Please document me @@ -101,7 +80,7 @@ pub struct Spec { /// TODO [arkpar] Please document me pub extra_data: Bytes, /// TODO [Gav Wood] Please document me - pub genesis_state: HashMap, + pub genesis_state: PodState, /// TODO [Gav Wood] Please document me pub seal_fields: usize, /// TODO [Gav Wood] Please document me @@ -126,7 +105,7 @@ impl Spec { /// Return the state root for the genesis state, memoising accordingly. pub fn state_root(&self) -> H256 { if self.state_root_memo.read().unwrap().is_none() { - *self.state_root_memo.write().unwrap() = Some(sec_trie_root(self.genesis_state.iter().map(|(k, v)| (k.to_vec(), v.rlp())).collect())); + *self.state_root_memo.write().unwrap() = Some(self.genesis_state.root()); } self.state_root_memo.read().unwrap().as_ref().unwrap().clone() } @@ -174,6 +153,35 @@ impl Spec { ret.append_raw(&empty_list, 1); ret.out() } + + /// Overwrite the genesis components with the given JSON, assuming standard Ethereum test format. + pub fn overwrite_genesis(&mut self, genesis: &Json) { + let (seal_fields, seal_rlp) = { + if genesis.find("mixHash").is_some() && genesis.find("nonce").is_some() { + let mut s = RlpStream::new(); + s.append(&H256::from_json(&genesis["mixHash"])); + s.append(&H64::from_json(&genesis["nonce"])); + (2, s.out()) + } else { + // backup algo that will work with sealFields/sealRlp (and without). + ( + u64::from_json(&genesis["sealFields"]) as usize, + Bytes::from_json(&genesis["sealRlp"]) + ) + } + }; + + self.parent_hash = H256::from_json(&genesis["parentHash"]); + self.author = Address::from_json(&genesis["coinbase"]); + self.difficulty = U256::from_json(&genesis["difficulty"]); + self.gas_limit = U256::from_json(&genesis["gasLimit"]); + self.gas_used = U256::from_json(&genesis["gasUsed"]); + self.timestamp = u64::from_json(&genesis["timestamp"]); + self.extra_data = Bytes::from_json(&genesis["extraData"]); + self.seal_fields = seal_fields; + self.seal_rlp = seal_rlp; + self.state_root_memo = RwLock::new(genesis.find("stateRoot").and_then(|_| Some(H256::from_json(&genesis["stateRoot"])))); + } } impl FromJson for Spec { @@ -181,8 +189,8 @@ impl FromJson for Spec { fn from_json(json: &Json) -> Spec { // once we commit ourselves to some json parsing library (serde?) // move it to proper data structure - let mut state = HashMap::new(); - let mut builtins = HashMap::new(); + let mut builtins = BTreeMap::new(); + let mut state = PodState::new(); if let Some(&Json::Object(ref accounts)) = json.find("accounts") { for (address, acc) in accounts.iter() { @@ -192,15 +200,8 @@ impl FromJson for Spec { builtins.insert(addr.clone(), builtin); } } - let balance = acc.find("balance").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None }); - let nonce = acc.find("nonce").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None }); -// let balance = if let Some(&Json::String(ref b)) = acc.find("balance") {U256::from_dec_str(b).unwrap_or(U256::from(0))} else {U256::from(0)}; -// let nonce = if let Some(&Json::String(ref n)) = acc.find("nonce") {U256::from_dec_str(n).unwrap_or(U256::from(0))} else {U256::from(0)}; - // TODO: handle code & data if they exist. - if balance.is_some() || nonce.is_some() { - state.insert(addr, GenesisAccount { balance: balance.unwrap_or_else(U256::zero), nonce: nonce.unwrap_or_else(U256::zero) }); - } } + state = xjson!(&json["accounts"]); } let nodes = if let Some(&Json::Array(ref ns)) = json.find("nodes") { @@ -253,7 +254,7 @@ impl Spec { let mut root = H256::new(); { let mut t = SecTrieDBMut::new(db, &mut root); - for (address, account) in &self.genesis_state { + for (address, account) in self.genesis_state.get().iter() { t.insert(address.as_slice(), &account.rlp()); } } diff --git a/src/state.rs b/src/state.rs index 4b7e5af34..6e5d586f3 100644 --- a/src/state.rs +++ b/src/state.rs @@ -221,7 +221,7 @@ impl State { /// Populate a PodAccount map from this state. pub fn to_pod(&self) -> PodState { // TODO: handle database rather than just the cache. - PodState::new(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { + PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { if let Some(ref acc) = *opt { m.insert(add.clone(), PodAccount::from_account(acc)); } diff --git a/src/state_diff.rs b/src/state_diff.rs index 08fccf3ed..12e2d76ca 100644 --- a/src/state_diff.rs +++ b/src/state_diff.rs @@ -32,8 +32,8 @@ mod test { #[test] fn create_delete() { - let a = PodState::new(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]); - assert_eq!(StateDiff::diff_pod(&a, &PodState::new(map![])), StateDiff(map![ + let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]); + assert_eq!(StateDiff::diff_pod(&a, &PodState::new()), StateDiff(map![ x!(1) => AccountDiff{ balance: Diff::Died(x!(69)), nonce: Diff::Died(x!(0)), @@ -41,7 +41,7 @@ mod test { storage: map![], } ])); - assert_eq!(StateDiff::diff_pod(&PodState::new(map![]), &a), StateDiff(map![ + assert_eq!(StateDiff::diff_pod(&PodState::new(), &a), StateDiff(map![ x!(1) => AccountDiff{ balance: Diff::Born(x!(69)), nonce: Diff::Born(x!(0)), @@ -53,8 +53,8 @@ mod test { #[test] fn create_delete_with_unchanged() { - let a = PodState::new(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]); - let b = PodState::new(map![ + let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]); + let b = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]), x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]); @@ -78,11 +78,11 @@ mod test { #[test] fn change_with_unchanged() { - let a = PodState::new(map![ + let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]), x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]); - let b = PodState::new(map![ + let b = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(1), vec![], map![]), x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]); diff --git a/src/tests/chain.rs b/src/tests/chain.rs new file mode 100644 index 000000000..db3e398b0 --- /dev/null +++ b/src/tests/chain.rs @@ -0,0 +1,49 @@ +use std::env; +use super::test_common::*; +use client::{BlockChainClient,Client}; +use pod_state::*; +use ethereum; + +fn do_json_test(json_data: &[u8]) -> Vec { + let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid"); + let mut failed = Vec::new(); + + for (name, test) in json.as_object().unwrap() { + let mut fail = false; + { + let mut fail_unless = |cond: bool| if !cond && !fail { + failed.push(name.clone()); + flush(format!("FAIL\n")); + fail = true; + true + } else {false}; + + flush(format!(" - {}...", name)); + + let blocks: Vec = test["blocks"].as_array().unwrap().iter().map(|e| xjson!(&e["rlp"])).collect(); + let mut spec = ethereum::new_frontier_like_test(); + spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); + spec.genesis_state = PodState::from_json(test.find("pre").unwrap()); + + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + { + let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); + blocks.into_iter().foreach(|b| { + client.import_block(b).unwrap(); + }); + client.flush_queue(); + client.import_verified_blocks(&IoChannel::disconnected()); + flushln!("Best hash: {}", client.chain_info().best_block_hash); + } + fs::remove_dir_all(&dir).unwrap(); + } + if !fail { + flush(format!("ok\n")); + } + } + println!("!!! {:?} tests from failed.", failed.len()); + failed +} + +declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} diff --git a/src/tests/mod.rs b/src/tests/mod.rs index c30f7f9b8..799c47230 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -4,3 +4,4 @@ mod test_common; mod transaction; mod executive; mod state; +mod chain; diff --git a/util/src/standard.rs b/util/src/standard.rs index 19a084a2c..873df6cb4 100644 --- a/util/src/standard.rs +++ b/util/src/standard.rs @@ -1,13 +1,14 @@ pub use std::io; +pub use std::fs; pub use std::str; pub use std::fmt; -pub use std::slice; pub use std::cmp; pub use std::ptr; -pub use std::result; -pub use std::option; pub use std::mem; pub use std::ops; +pub use std::slice; +pub use std::result; +pub use std::option; pub use std::path::Path; pub use std::str::{FromStr}; @@ -15,9 +16,9 @@ pub use std::io::{Read,Write}; pub use std::hash::{Hash, Hasher}; pub use std::error::Error as StdError; -pub use std::sync::*; pub use std::ops::*; pub use std::cmp::*; +pub use std::sync::*; pub use std::cell::*; pub use std::collections::*; From a43ca9ae34d7dcd299cd82bc1ee36ac15464f9bb Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 25 Jan 2016 19:20:34 +0100 Subject: [PATCH 058/138] blockqueue flush --- src/block_queue.rs | 22 +++++++++++++++++++--- src/client.rs | 7 +------ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/block_queue.rs b/src/block_queue.rs index 36539bfff..a0e46193b 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -35,6 +35,7 @@ pub struct BlockQueue { verifiers: Vec>, deleting: Arc, ready_signal: Arc, + empty: Arc, processing: HashSet } @@ -79,6 +80,7 @@ impl BlockQueue { let more_to_verify = Arc::new(Condvar::new()); let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel }); let deleting = Arc::new(AtomicBool::new(false)); + let empty = Arc::new(Condvar::new()); let mut verifiers: Vec> = Vec::new(); let thread_count = max(::num_cpus::get(), 3) - 2; @@ -87,8 +89,9 @@ impl BlockQueue { let engine = engine.clone(); let more_to_verify = more_to_verify.clone(); let ready_signal = ready_signal.clone(); + let empty = empty.clone(); let deleting = deleting.clone(); - verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting)) + verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)) .expect("Error starting block verification thread")); } BlockQueue { @@ -99,14 +102,20 @@ impl BlockQueue { verifiers: verifiers, deleting: deleting.clone(), processing: HashSet::new(), + empty: empty.clone(), } } - fn verify(verification: Arc>, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc) { + fn verify(verification: Arc>, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { while !deleting.load(AtomicOrdering::Relaxed) { - { let mut lock = verification.lock().unwrap(); + + if lock.unverified.is_empty() && lock.verifying.is_empty() { + empty.notify_all(); + } + + while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Relaxed) { lock = wait.wait(lock).unwrap(); } @@ -176,6 +185,13 @@ impl BlockQueue { verification.verifying.clear(); } + /// Wait for queue to be empty + pub fn flush(&mut self) { + let mutex: Mutex<()> = Mutex::new(()); + let lock = mutex.lock().unwrap(); + let _ = self.empty.wait(lock).unwrap(); + } + /// Add a block to the queue. pub fn import_block(&mut self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); diff --git a/src/client.rs b/src/client.rs index 795bca546..f05819370 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,5 +1,3 @@ -use std::thread; -use std::time; use util::*; use rocksdb::{Options, DB}; use blockchain::{BlockChain, BlockProvider, CacheSize}; @@ -184,10 +182,7 @@ impl Client { /// Flush the block import queue. pub fn flush_queue(&self) { flushln!("Flushing queue {:?}", self.block_queue.read().unwrap().queue_info()); - while self.block_queue.read().unwrap().queue_info().unverified_queue_size > 0 { - thread::sleep(time::Duration::from_millis(20)); - flushln!("Flushing queue [waited] {:?}", self.block_queue.read().unwrap().queue_info()); - } + self.block_queue.write().unwrap().flush(); } /// This is triggered by a message coming from a block queue when the block is ready for insertion From 41508cbd507c4514be48cae43c1eb8d8cef4ab21 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 25 Jan 2016 23:24:51 +0100 Subject: [PATCH 059/138] Fix queue flush and add working tests. --- src/block.rs | 1 + src/block_queue.rs | 19 ++++++++++++------- src/client.rs | 7 +++---- src/spec.rs | 13 ++++++++++++- src/state.rs | 6 +++--- src/sync/tests.rs | 1 + src/tests/chain.rs | 11 ++++++++--- 7 files changed, 40 insertions(+), 18 deletions(-) diff --git a/src/block.rs b/src/block.rs index 1ff326430..c63f49bd7 100644 --- a/src/block.rs +++ b/src/block.rs @@ -188,6 +188,7 @@ impl<'x, 'y> OpenBlock<'x, 'y> { // info!("env_info says gas_used={}", env_info.gas_used); match self.block.state.apply(&env_info, self.engine, &t) { Ok(receipt) => { + flushln!("Transaction executed {:?}", receipt); self.block.archive_set.insert(h.unwrap_or_else(||t.hash())); self.block.archive.push(Entry { transaction: t, receipt: receipt }); Ok(&self.block.archive.last().unwrap().receipt) diff --git a/src/block_queue.rs b/src/block_queue.rs index a0e46193b..fa091d0c4 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -19,11 +19,16 @@ pub struct BlockQueueInfo { pub unverified_queue_size: usize, /// Number of verified queued blocks pending import pub verified_queue_size: usize, + /// Number of blocks being verified + pub verifying_queue_size: usize, } impl BlockQueueInfo { /// The total size of the queues. - pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size } + pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size } + + /// The size of the unverified and verifying queues. + pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size } } /// A queue of blocks. Sits between network or other I/O and the BlockChain. @@ -91,7 +96,7 @@ impl BlockQueue { let ready_signal = ready_signal.clone(); let empty = empty.clone(); let deleting = deleting.clone(); - verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)) + verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)) .expect("Error starting block verification thread")); } BlockQueue { @@ -115,7 +120,6 @@ impl BlockQueue { empty.notify_all(); } - while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Relaxed) { lock = wait.wait(lock).unwrap(); } @@ -154,7 +158,6 @@ impl BlockQueue { }, Err(err) => { let mut v = verification.lock().unwrap(); - flushln!("Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); v.bad.insert(block_hash.clone()); v.verifying.retain(|e| e.hash != block_hash); @@ -187,9 +190,10 @@ impl BlockQueue { /// Wait for queue to be empty pub fn flush(&mut self) { - let mutex: Mutex<()> = Mutex::new(()); - let lock = mutex.lock().unwrap(); - let _ = self.empty.wait(lock).unwrap(); + let mut verification = self.verification.lock().unwrap(); + while !verification.unverified.is_empty() && !verification.verifying.is_empty() { + verification = self.empty.wait(verification).unwrap(); + } } /// Add a block to the queue. @@ -265,6 +269,7 @@ impl BlockQueue { full: false, verified_queue_size: verification.verified.len(), unverified_queue_size: verification.unverified.len(), + verifying_queue_size: verification.verifying.len(), } } } diff --git a/src/client.rs b/src/client.rs index f05819370..2cc6c150d 100644 --- a/src/client.rs +++ b/src/client.rs @@ -181,7 +181,6 @@ impl Client { /// Flush the block import queue. pub fn flush_queue(&self) { - flushln!("Flushing queue {:?}", self.block_queue.read().unwrap().queue_info()); self.block_queue.write().unwrap().flush(); } @@ -189,9 +188,8 @@ impl Client { pub fn import_verified_blocks(&self, _io: &IoChannel) { let mut bad = HashSet::new(); let _import_lock = self.import_lock.lock(); - - for block in self.block_queue.write().unwrap().drain(128) { - flushln!("Importing block..."); + let blocks = self.block_queue.write().unwrap().drain(128); + for block in blocks { if bad.contains(&block.header.parent_hash) { self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); @@ -238,6 +236,7 @@ impl Client { } }; if let Err(e) = verify_block_final(&header, result.block().header()) { + flushln!("Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); return; diff --git a/src/spec.rs b/src/spec.rs index 9e97595b9..326552524 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -80,7 +80,7 @@ pub struct Spec { /// TODO [arkpar] Please document me pub extra_data: Bytes, /// TODO [Gav Wood] Please document me - pub genesis_state: PodState, + genesis_state: PodState, /// TODO [Gav Wood] Please document me pub seal_fields: usize, /// TODO [Gav Wood] Please document me @@ -182,6 +182,17 @@ impl Spec { self.seal_rlp = seal_rlp; self.state_root_memo = RwLock::new(genesis.find("stateRoot").and_then(|_| Some(H256::from_json(&genesis["stateRoot"])))); } + + /// Alter the value of the genesis state. + pub fn set_genesis_state(&mut self, s: PodState) { + self.genesis_state = s; + *self.state_root_memo.write().unwrap() = None; + } + + /// Returns `false` if the memoized state root is invalid. `true` otherwise. + pub fn is_state_root_valid(&self) -> bool { + self.state_root_memo.read().unwrap().clone().map_or(true, |sr| sr == self.genesis_state.root()) + } } impl FromJson for Spec { diff --git a/src/state.rs b/src/state.rs index 6e5d586f3..71ae0ecda 100644 --- a/src/state.rs +++ b/src/state.rs @@ -146,15 +146,15 @@ impl State { /// This will change the state accordingly. pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &Transaction) -> ApplyResult { - let old = self.to_pod(); +// let old = self.to_pod(); let e = try!(Executive::new(self, env_info, engine).transact(t)); //println!("Executed: {:?}", e); - trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod())); +// trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod())); self.commit(); let receipt = Receipt::new(self.root().clone(), e.cumulative_gas_used, e.logs); - trace!("Transaction receipt: {:?}", receipt); +// trace!("Transaction receipt: {:?}", receipt); Ok(receipt) } diff --git a/src/sync/tests.rs b/src/sync/tests.rs index 7f8a1748b..50d6efab2 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -158,6 +158,7 @@ impl BlockChainClient for TestBlockChainClient { full: false, verified_queue_size: 0, unverified_queue_size: 0, + verifying_queue_size: 0, } } diff --git a/src/tests/chain.rs b/src/tests/chain.rs index db3e398b0..922def6c7 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -22,8 +22,9 @@ fn do_json_test(json_data: &[u8]) -> Vec { let blocks: Vec = test["blocks"].as_array().unwrap().iter().map(|e| xjson!(&e["rlp"])).collect(); let mut spec = ethereum::new_frontier_like_test(); + spec.set_genesis_state(PodState::from_json(test.find("pre").unwrap())); spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); - spec.genesis_state = PodState::from_json(test.find("pre").unwrap()); + assert!(spec.is_state_root_valid()); let mut dir = env::temp_dir(); dir.push(H32::random().hex()); @@ -32,9 +33,12 @@ fn do_json_test(json_data: &[u8]) -> Vec { blocks.into_iter().foreach(|b| { client.import_block(b).unwrap(); }); + flushln!("Imported all"); client.flush_queue(); + flushln!("Flushed"); client.import_verified_blocks(&IoChannel::disconnected()); - flushln!("Best hash: {}", client.chain_info().best_block_hash); + flushln!("Checking..."); + fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); } fs::remove_dir_all(&dir).unwrap(); } @@ -46,4 +50,5 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } -declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} +//declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} +declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} From 60af30558c91363239363aef632a41903a5a9cec Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 25 Jan 2016 23:26:42 +0100 Subject: [PATCH 060/138] Cleanups. --- src/tests/chain.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 922def6c7..aeb92d3fd 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -33,11 +33,8 @@ fn do_json_test(json_data: &[u8]) -> Vec { blocks.into_iter().foreach(|b| { client.import_block(b).unwrap(); }); - flushln!("Imported all"); client.flush_queue(); - flushln!("Flushed"); client.import_verified_blocks(&IoChannel::disconnected()); - flushln!("Checking..."); fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); } fs::remove_dir_all(&dir).unwrap(); @@ -50,5 +47,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } +// Fails. TODO: figure out why. //declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} + declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} From e479e8ca973ccd9e918518dbff5c7f6cf7e78785 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 25 Jan 2016 23:37:49 +0100 Subject: [PATCH 061/138] Tody ups. --- src/state.rs | 4 ++-- src/tests/chain.rs | 17 ++++++++++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/state.rs b/src/state.rs index 71ae0ecda..7310f63a7 100644 --- a/src/state.rs +++ b/src/state.rs @@ -3,7 +3,7 @@ use engine::Engine; use executive::Executive; use pod_account::*; use pod_state::*; -use state_diff::*; +//use state_diff::*; // TODO: uncomment once to_pod() works correctly. /// TODO [Gav Wood] Please document me pub type ApplyResult = Result; @@ -145,12 +145,12 @@ impl State { /// Execute a given transaction. /// This will change the state accordingly. pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &Transaction) -> ApplyResult { - // let old = self.to_pod(); let e = try!(Executive::new(self, env_info, engine).transact(t)); //println!("Executed: {:?}", e); + // TODO uncomment once to_pod() works correctly. // trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod())); self.commit(); let receipt = Receipt::new(self.root().clone(), e.cumulative_gas_used, e.logs); diff --git a/src/tests/chain.rs b/src/tests/chain.rs index aeb92d3fd..89199feed 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -47,7 +47,18 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } -// Fails. TODO: figure out why. -//declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} - +declare_test!{ignore => BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} // UNKNOWN declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} +declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} // FAILS (Suicides, GasUsed) +declare_test!{ignore => BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"}// UNKNOWN +declare_test!{ignore => BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} // UNKNOWN From b6622e6efed4a68a70a28879261bdb7d475c640d Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 25 Jan 2016 23:39:21 +0100 Subject: [PATCH 062/138] Remove flushln!s. --- src/block.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/block.rs b/src/block.rs index c63f49bd7..1ff326430 100644 --- a/src/block.rs +++ b/src/block.rs @@ -188,7 +188,6 @@ impl<'x, 'y> OpenBlock<'x, 'y> { // info!("env_info says gas_used={}", env_info.gas_used); match self.block.state.apply(&env_info, self.engine, &t) { Ok(receipt) => { - flushln!("Transaction executed {:?}", receipt); self.block.archive_set.insert(h.unwrap_or_else(||t.hash())); self.block.archive.push(Entry { transaction: t, receipt: receipt }); Ok(&self.block.archive.last().unwrap().receipt) From 16f2fa33ee9142ae8633b1a54998cf4a290bacc8 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Mon, 25 Jan 2016 23:59:50 +0100 Subject: [PATCH 063/138] Merging CALL & DELEGATECALL branches --- src/evm/ext.rs | 2 +- src/evm/interpreter.rs | 69 ++++++++++++++---------------------------- src/evm/tests.rs | 2 +- src/externalities.rs | 4 +-- src/tests/executive.rs | 4 +-- 5 files changed, 29 insertions(+), 52 deletions(-) diff --git a/src/evm/ext.rs b/src/evm/ext.rs index 748bc89da..83f093bcf 100644 --- a/src/evm/ext.rs +++ b/src/evm/ext.rs @@ -57,7 +57,7 @@ pub trait Ext { gas: &U256, sender_address: &Address, receive_address: &Address, - value: Option<&U256>, + value: Option, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult; diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index 276f2873b..7657b1bbe 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -566,66 +566,43 @@ impl Interpreter { } }; }, - instructions::DELEGATECALL => { - let call_gas = stack.pop_back(); - let code_address = stack.pop_back(); - let code_address = u256_to_address(&code_address); - - let in_off = stack.pop_back(); - let in_size = stack.pop_back(); - let out_off = stack.pop_back(); - let out_size = stack.pop_back(); - - let can_call = ext.depth() < ext.schedule().max_depth; - if !can_call { - stack.push(U256::zero()); - return Ok(InstructionResult::UnusedGas(call_gas)); - } - - let call_result = { - // we need to write and read from memory in the same time - // and we don't want to copy - let input = unsafe { ::std::mem::transmute(mem.read_slice(in_off, in_size)) }; - let output = mem.writeable_slice(out_off, out_size); - ext.call(&call_gas, ¶ms.sender, ¶ms.address, None, input, &code_address, output) - }; - - return match call_result { - MessageCallResult::Success(gas_left) => { - stack.push(U256::one()); - Ok(InstructionResult::UnusedGas(gas_left)) - }, - MessageCallResult::Failed => { - stack.push(U256::zero()); - Ok(InstructionResult::Ok) - } - }; - }, - instructions::CALL | instructions::CALLCODE => { + instructions::CALL | instructions::CALLCODE | instructions::DELEGATECALL => { assert!(ext.schedule().call_value_transfer_gas > ext.schedule().call_stipend, "overflow possible"); let call_gas = stack.pop_back(); let code_address = stack.pop_back(); let code_address = u256_to_address(&code_address); - let value = stack.pop_back(); + let value = match instruction == instructions::DELEGATECALL { + true => None, + false => Some(stack.pop_back()) + }; let in_off = stack.pop_back(); let in_size = stack.pop_back(); let out_off = stack.pop_back(); let out_size = stack.pop_back(); - let call_gas = call_gas + match value > U256::zero() { + // Add stipend (only CALL|CALLCODE when value > 0) + let call_gas = call_gas + value.map_or_else(U256::zero, |val| match val > U256::zero() { true => U256::from(ext.schedule().call_stipend), false => U256::zero() + }); + + // Get sender & receive addresses, check if we have balance + let (sender_address, receive_address, has_balance) = match instruction { + instructions::CALL => { + let has_balance = ext.balance(¶ms.address) >= value.unwrap(); + (¶ms.address, &code_address, has_balance) + }, + instructions::CALLCODE => { + let has_balance = ext.balance(¶ms.address) >= value.unwrap(); + (¶ms.address, ¶ms.address, has_balance) + }, + instructions::DELEGATECALL => (¶ms.sender, ¶ms.address, true), + _ => panic!(format!("Unexpected instruction {} in CALL branch.", instruction)) }; - let (sender_address, receive_address) = match instruction == instructions::CALL { - true => (¶ms.address, &code_address), - false => (¶ms.address, ¶ms.address) - }; - - let can_call = ext.balance(¶ms.address) >= value && ext.depth() < ext.schedule().max_depth; - + let can_call = has_balance && ext.depth() < ext.schedule().max_depth; if !can_call { stack.push(U256::zero()); return Ok(InstructionResult::UnusedGas(call_gas)); @@ -636,7 +613,7 @@ impl Interpreter { // and we don't want to copy let input = unsafe { ::std::mem::transmute(mem.read_slice(in_off, in_size)) }; let output = mem.writeable_slice(out_off, out_size); - ext.call(&call_gas, sender_address, receive_address, Some(&value), input, &code_address, output) + ext.call(&call_gas, sender_address, receive_address, value, input, &code_address, output) }; return match call_result { diff --git a/src/evm/tests.rs b/src/evm/tests.rs index cf4262914..ad81cf877 100644 --- a/src/evm/tests.rs +++ b/src/evm/tests.rs @@ -63,7 +63,7 @@ impl Ext for FakeExt { _gas: &U256, _sender_address: &Address, _receive_address: &Address, - _value: Option<&U256>, + _value: Option, _data: &[u8], _code_address: &Address, _output: &mut [u8]) -> MessageCallResult { diff --git a/src/externalities.rs b/src/externalities.rs index d8b5d6110..f9a79c3c0 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -138,7 +138,7 @@ impl<'a> Ext for Externalities<'a> { gas: &U256, sender_address: &Address, receive_address: &Address, - value: Option<&U256>, + value: Option, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult { @@ -156,7 +156,7 @@ impl<'a> Ext for Externalities<'a> { }; if let Some(value) = value { - params.value = ActionValue::Transfer(value.clone()); + params.value = ActionValue::Transfer(value); } let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth); diff --git a/src/tests/executive.rs b/src/tests/executive.rs index 0604c9992..a94fd1605 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -103,7 +103,7 @@ impl<'a> Ext for TestExt<'a> { gas: &U256, _sender_address: &Address, receive_address: &Address, - value: Option<&U256>, + value: Option, data: &[u8], _code_address: &Address, _output: &mut [u8]) -> MessageCallResult { @@ -111,7 +111,7 @@ impl<'a> Ext for TestExt<'a> { data: data.to_vec(), destination: Some(receive_address.clone()), gas_limit: *gas, - value: *value.unwrap() + value: value.unwrap() }); MessageCallResult::Success(*gas) } From 746e2a57c9793da187d6df7dbb2b0a6948f0df27 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 26 Jan 2016 00:03:01 +0100 Subject: [PATCH 064/138] Cleaning readme --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 48172bb60..216ac8091 100644 --- a/README.md +++ b/README.md @@ -1,5 +1 @@ # ethcore - - -# Running clippy - From 3fe0c3c7897b36fa9ed87ccca275d7c277093264 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 00:26:36 +0100 Subject: [PATCH 065/138] Unignore passingn tests. --- src/tests/chain.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 89199feed..ba42ae935 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -47,18 +47,18 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } -declare_test!{ignore => BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} // FAILS declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} -declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} // FAILS (Suicides, GasUsed) -declare_test!{ignore => BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"}// UNKNOWN -declare_test!{ignore => BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // UNKNOWN -declare_test!{ignore => BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} // UNKNOWN +declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // FAILS +declare_test!{ignore => BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // FAILS +declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} +declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} +declare_test!{ignore => BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // FAILS +declare_test!{ignore => BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // FAILS +declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} +declare_test!{ignore => BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} // FAILS (Suicides, GasUsed) +declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} +declare_test!{ignore => BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} // FAILS +declare_test!{ignore => BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // FAILS +declare_test!{ignore => BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // FAILS +declare_test!{ignore => BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} // FAILS From 5237c575660a1d9dd0566dbfe6a4d0b588a20fdd Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 26 Jan 2016 00:42:07 +0100 Subject: [PATCH 066/138] block transaction count --- src/bin/client/rpc/impls/eth.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/bin/client/rpc/impls/eth.rs b/src/bin/client/rpc/impls/eth.rs index 821eacd07..f4d102131 100644 --- a/src/bin/client/rpc/impls/eth.rs +++ b/src/bin/client/rpc/impls/eth.rs @@ -59,6 +59,10 @@ impl Eth for EthClient { _ => Err(Error::invalid_params()) } } + + fn block_transaction_count(&self, _: Params) -> Result { + Ok(Value::U64(0)) + } } pub struct EthFilterClient { @@ -83,7 +87,6 @@ impl EthFilter for EthFilterClient { } fn filter_changes(&self, _: Params) -> Result { - println!("filter changes: {:?}", self.client.chain_info().best_block_hash.to_hex()); Ok(Value::Array(vec![Value::String(self.client.chain_info().best_block_hash.to_hex())])) } } From 2dcfb52b5692d37368e6d60e4d6eb2aad9f2fbbf Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 00:46:24 +0100 Subject: [PATCH 067/138] Closes #213 --- src/tests/chain.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index ba42ae935..d3e978e05 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -47,6 +47,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } + declare_test!{ignore => BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} // FAILS declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // FAILS From 7065c477a4ec5ac81b3cb24c4b45153f437e9287 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 00:48:01 +0100 Subject: [PATCH 068/138] Closes #70 --- src/tests/chain.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index d3e978e05..ba42ae935 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -47,7 +47,6 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } - declare_test!{ignore => BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} // FAILS declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // FAILS From 280749b34cfdce8c110cf93f52aaa677f57c3d72 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 26 Jan 2016 09:13:44 +0100 Subject: [PATCH 069/138] Removing println --- src/executive.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/executive.rs b/src/executive.rs index f5952e530..af738b0f6 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -166,7 +166,6 @@ impl<'a> Executive<'a> { /// Modifies the substate and the output. /// Returns either gas_left or `evm::Error`. pub fn call(&mut self, params: ActionParams, substate: &mut Substate, mut output: BytesRef) -> evm::Result { - println!("Calling executive. Sender: {}", params.sender); // backup used in case of running out of gas let backup = self.state.clone(); From c66aa52166ab04f51ca90ded67574070e9357622 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 26 Jan 2016 10:15:55 +0100 Subject: [PATCH 070/138] Spawning new thread when we are reaching stack limit --- Cargo.toml | 1 + src/evm/jit.rs | 19 ++++++++++++------- src/evm/tests.rs | 1 + src/executive.rs | 36 +++++++++++++++++++++++++++++------- src/lib.rs | 1 + src/tests/executive.rs | 4 ++-- src/tests/state.rs | 10 +++++----- 7 files changed, 51 insertions(+), 21 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 54b1b406e..ce49d0dd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ num_cpus = "0.2" docopt = "0.6" docopt_macros = "0.6" ctrlc = "1.0" +crossbeam = "0.1.5" clippy = "0.0.37" [features] diff --git a/src/evm/jit.rs b/src/evm/jit.rs index 9f990155d..e073a380d 100644 --- a/src/evm/jit.rs +++ b/src/evm/jit.rs @@ -64,7 +64,7 @@ impl IntoJit for H256 { for i in 0..self.bytes().len() { let rev = self.bytes().len() - 1 - i; let pos = rev / 8; - ret[pos] += (self.bytes()[i] as u64) << (rev % 8) * 8; + ret[pos] += (self.bytes()[i] as u64) << ((rev % 8) * 8); } evmjit::I256 { words: ret } } @@ -218,9 +218,11 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { } } - match self.ext.call(&call_gas, + match self.ext.call( + &call_gas, + &self.address, &receive_address, - &value, + Some(value), unsafe { slice::from_raw_parts(in_beg, in_size as usize) }, &code_address, unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }) { @@ -262,7 +264,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { } let bytes_ref: &[u8] = slice::from_raw_parts(beg, size as usize); - self.ext.log(topics, bytes_ref.to_vec()); + self.ext.log(topics, bytes_ref); } } @@ -287,8 +289,8 @@ impl evm::Evm for JitEvm { assert!(params.gas <= U256::from(i64::max_value() as u64), "evmjit max gas is 2 ^ 63"); assert!(params.gas_price <= U256::from(i64::max_value() as u64), "evmjit max gas is 2 ^ 63"); - let call_data = params.data.unwrap_or(vec![]); - let code = params.code.unwrap_or(vec![]); + let call_data = params.data.unwrap_or_else(Vec::new); + let code = params.code.unwrap_or_else(Vec::new); let mut data = evmjit::RuntimeDataHandle::new(); data.gas = params.gas.low_u64() as i64; @@ -303,7 +305,10 @@ impl evm::Evm for JitEvm { data.address = params.address.into_jit(); data.caller = params.sender.into_jit(); data.origin = params.origin.into_jit(); - data.call_value = params.value.into_jit(); + data.call_value = match params.value { + ActionValue::Transfer(val) => val.into_jit(), + ActionValue::Apparent(val) => val.into_jit() + }; data.author = ext.env_info().author.clone().into_jit(); data.difficulty = ext.env_info().difficulty.into_jit(); diff --git a/src/evm/tests.rs b/src/evm/tests.rs index ad81cf877..d448ccb3a 100644 --- a/src/evm/tests.rs +++ b/src/evm/tests.rs @@ -215,6 +215,7 @@ fn test_origin(factory: super::Factory) { assert_eq!(ext.store.get(&H256::new()).unwrap(), &H256::from_str("000000000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681").unwrap()); } +// TODO [todr] Fails with Signal 11 on JIT evm_test!{test_sender: test_sender_jit, test_sender_int} fn test_sender(factory: super::Factory) { let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); diff --git a/src/executive.rs b/src/executive.rs index f5952e530..d5446b0e9 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -5,6 +5,12 @@ use engine::*; use evm::{self, Ext}; use externalities::*; use substate::*; +use crossbeam; + +/// Max depth to avoid stack overflow (when it's reached we start a new thread with VM) +/// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132) +/// Maybe something like here: https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp +const MAX_VM_DEPTH_FOR_THREAD: usize = 128; /// Returns new address created from address and given nonce. pub fn contract_address(address: &Address, nonce: &U256) -> Address { @@ -161,12 +167,32 @@ impl<'a> Executive<'a> { Ok(try!(self.finalize(t, substate, res))) } + fn exec_vm(&mut self, params: ActionParams, unconfirmed_substate: &mut Substate, output_policy: OutputPolicy) -> evm::Result { + // Ordinary execution - keep VM in same thread + if (self.depth + 1) % MAX_VM_DEPTH_FOR_THREAD != 0 { + let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy); + let vm_factory = self.engine.vm_factory(); + return vm_factory.create().exec(params, &mut ext); + } + + // Start in new thread to reset stack + // TODO [todr] No thread builder yet, so we need to reset once for a while + // https://github.com/aturon/crossbeam/issues/16 + crossbeam::scope(|scope| { + let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy); + let vm_factory = self.engine.vm_factory(); + + scope.spawn(move || { + vm_factory.create().exec(params, &mut ext) + }) + }).join() + } + /// Calls contract function with given contract params. /// NOTE. It does not finalize the transaction (doesn't do refunds, nor suicides). /// Modifies the substate and the output. /// Returns either gas_left or `evm::Error`. pub fn call(&mut self, params: ActionParams, substate: &mut Substate, mut output: BytesRef) -> evm::Result { - println!("Calling executive. Sender: {}", params.sender); // backup used in case of running out of gas let backup = self.state.clone(); @@ -201,8 +227,7 @@ impl<'a> Executive<'a> { let mut unconfirmed_substate = Substate::new(); let res = { - let mut ext = self.as_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::Return(output)); - self.engine.vm_factory().create().exec(params, &mut ext) + self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::Return(output)) }; trace!("exec: sstore-clears={}\n", unconfirmed_substate.sstore_clears_count); @@ -235,8 +260,7 @@ impl<'a> Executive<'a> { } let res = { - let mut ext = self.as_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::InitContract); - self.engine.vm_factory().create().exec(params, &mut ext) + self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract) }; self.enact_result(&res, substate, unconfirmed_substate, backup); res @@ -277,7 +301,6 @@ impl<'a> Executive<'a> { match result { Err(evm::Error::Internal) => Err(ExecutionError::Internal), - // TODO [ToDr] BadJumpDestination @debris - how to handle that? Err(_) => { Ok(Executed { gas: t.gas, @@ -302,7 +325,6 @@ impl<'a> Executive<'a> { } fn enact_result(&mut self, result: &evm::Result, substate: &mut Substate, un_substate: Substate, backup: State) { - // TODO: handle other evm::Errors same as OutOfGas once they are implemented match *result { Err(evm::Error::OutOfGas) | Err(evm::Error::BadJumpDestination {..}) diff --git a/src/lib.rs b/src/lib.rs index 262f8682f..8dd02b3bc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -90,6 +90,7 @@ extern crate num_cpus; extern crate evmjit; #[macro_use] extern crate ethcore_util as util; +extern crate crossbeam; // NOTE: Add doc parser exception for these pub declarations. diff --git a/src/tests/executive.rs b/src/tests/executive.rs index a94fd1605..1df1b7eec 100644 --- a/src/tests/executive.rs +++ b/src/tests/executive.rs @@ -271,8 +271,8 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec { declare_test!{ExecutiveTests_vmArithmeticTest, "VMTests/vmArithmeticTest"} declare_test!{ExecutiveTests_vmBitwiseLogicOperationTest, "VMTests/vmBitwiseLogicOperationTest"} -// this one crashes with some vm internal error. Separately they pass. -declare_test!{ignore => ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} +declare_test!{ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"} + // TODO [todr] Fails with Signal 11 when using JIT declare_test!{ExecutiveTests_vmEnvironmentalInfoTest, "VMTests/vmEnvironmentalInfoTest"} declare_test!{ExecutiveTests_vmIOandFlowOperationsTest, "VMTests/vmIOandFlowOperationsTest"} declare_test!{heavy => ExecutiveTests_vmInputLimits, "VMTests/vmInputLimits"} diff --git a/src/tests/state.rs b/src/tests/state.rs index 75b9b2493..325a8b646 100644 --- a/src/tests/state.rs +++ b/src/tests/state.rs @@ -73,7 +73,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { declare_test!{StateTests_stBlockHashTest, "StateTests/stBlockHashTest"} declare_test!{StateTests_stCallCodes, "StateTests/stCallCodes"} -declare_test!{ignore => StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"} //<< Out of stack +declare_test!{StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"} declare_test!{StateTests_stDelegatecallTest, "StateTests/stDelegatecallTest"} declare_test!{StateTests_stExample, "StateTests/stExample"} declare_test!{StateTests_stInitCodeTest, "StateTests/stInitCodeTest"} @@ -81,12 +81,12 @@ declare_test!{StateTests_stLogTests, "StateTests/stLogTests"} declare_test!{heavy => StateTests_stMemoryStressTest, "StateTests/stMemoryStressTest"} declare_test!{heavy => StateTests_stMemoryTest, "StateTests/stMemoryTest"} declare_test!{StateTests_stPreCompiledContracts, "StateTests/stPreCompiledContracts"} -declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} //<< Too long -declare_test!{ignore => StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} //<< Out of stack +declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} +declare_test!{StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} declare_test!{StateTests_stRefundTest, "StateTests/stRefundTest"} declare_test!{StateTests_stSolidityTest, "StateTests/stSolidityTest"} -declare_test!{ignore => StateTests_stSpecialTest, "StateTests/stSpecialTest"} //<< Out of Stack -declare_test!{ignore => StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} //<< Out of stack +declare_test!{StateTests_stSpecialTest, "StateTests/stSpecialTest"} +declare_test!{StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} declare_test!{StateTests_stTransactionTest, "StateTests/stTransactionTest"} declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"} declare_test!{StateTests_stWalletTest, "StateTests/stWalletTest"} From e9bd52b34a579a4ebefc9b4caaf492db970a521a Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 26 Jan 2016 10:26:18 +0100 Subject: [PATCH 071/138] fixed failing tests --- src/sync/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 78a5d7613..34a1e429d 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -9,13 +9,13 @@ /// extern crate ethcore; /// use std::env; /// use std::sync::Arc; -/// use util::network::NetworkService; +/// use util::network::{NetworkService, NetworkConfiguration}; /// use ethcore::client::Client; /// use ethcore::sync::EthSync; /// use ethcore::ethereum; /// /// fn main() { -/// let mut service = NetworkService::start().unwrap(); +/// let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); /// let dir = env::temp_dir(); /// let client = Client::new(ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); /// EthSync::register(&mut service, client); From adbde5f3dac1abbdd7ba42d41a23038bc140c60c Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 26 Jan 2016 10:48:33 +0100 Subject: [PATCH 072/138] Schedule documentation --- src/evm/schedule.rs | 62 ++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/src/evm/schedule.rs b/src/evm/schedule.rs index 70edfceea..d46b7ff11 100644 --- a/src/evm/schedule.rs +++ b/src/evm/schedule.rs @@ -2,67 +2,67 @@ /// Definition of the cost schedule and other parameterisations for the EVM. pub struct Schedule { - /// TODO [Gav Wood] Please document me + /// Does it support exceptional failed code deposit pub exceptional_failed_code_deposit: bool, - /// TODO [Gav Wood] Please document me + /// Does it have a delegate cal pub have_delegate_call: bool, - /// TODO [Tomusdrw] Please document me + /// VM stack limit pub stack_limit: usize, - /// TODO [Gav Wood] Please document me + /// Max number of nested calls/creates pub max_depth: usize, - /// TODO [Gav Wood] Please document me + /// Gas prices for instructions in all tiers pub tier_step_gas: [usize; 8], - /// TODO [Gav Wood] Please document me + /// Gas price for `EXP` opcode pub exp_gas: usize, - /// TODO [Gav Wood] Please document me + /// Additional gas for `EXP` opcode for each byte of exponent pub exp_byte_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for `SHA3` opcode pub sha3_gas: usize, - /// TODO [Gav Wood] Please document me + /// Additional gas for `SHA3` opcode for each word of hashed memory pub sha3_word_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for loading from storage pub sload_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for setting new value to storage (`storage==0`, `new!=0`) pub sstore_set_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for altering value in storage pub sstore_reset_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`) pub sstore_refund_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for `JUMPDEST` opcode pub jumpdest_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for `LOG*` pub log_gas: usize, - /// TODO [Gav Wood] Please document me + /// Additional gas for data in `LOG*` pub log_data_gas: usize, - /// TODO [Gav Wood] Please document me + /// Additional gas for each topic in `LOG*` pub log_topic_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for `CREATE` opcode pub create_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for `*CALL*` opcodes pub call_gas: usize, - /// TODO [Gav Wood] Please document me + /// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0` pub call_stipend: usize, - /// TODO [Gav Wood] Please document me + /// Additional gas required for value transfer (`CALL|CALLCODE`) pub call_value_transfer_gas: usize, - /// TODO [Gav Wood] Please document me + /// Additional gas for creating new account (`CALL|CALLCODE`) pub call_new_account_gas: usize, - /// TODO [Gav Wood] Please document me + /// Refund for SUICIDE pub suicide_refund_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas for used memory pub memory_gas: usize, - /// TODO [Gav Wood] Please document me + /// Coefficient used to convert memory size to gas price for memory pub quad_coeff_div: usize, - /// TODO [Gav Wood] Please document me + /// Cost for contract length when executing `CREATE` pub create_data_gas: usize, - /// TODO [Gav Wood] Please document me + /// Transaction cost pub tx_gas: usize, - /// TODO [Gav Wood] Please document me + /// `CREATE` transaction cost pub tx_create_gas: usize, - /// TODO [Gav Wood] Please document me + /// Additional cost for empty data transaction pub tx_data_zero_gas: usize, - /// TODO [Gav Wood] Please document me + /// Aditional cost for non-empty data transaction pub tx_data_non_zero_gas: usize, - /// TODO [Gav Wood] Please document me + /// Gas price for copying memory pub copy_gas: usize, } From b1282fe1f45ffca0dae5848ea684ef53b28f3dc8 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 26 Jan 2016 11:37:24 +0100 Subject: [PATCH 073/138] api block struct --- src/bin/client/main.rs | 2 ++ src/bin/client/rpc/impls/eth.rs | 6 +++++- src/bin/client/rpc/impls/mod.rs | 6 +++--- src/bin/client/rpc/mod.rs | 3 ++- src/bin/client/rpc/types/block.rs | 32 +++++++++++++++++++++++++++++++ src/bin/client/rpc/types/mod.rs | 1 + 6 files changed, 45 insertions(+), 5 deletions(-) create mode 100644 src/bin/client/rpc/types/block.rs create mode 100644 src/bin/client/rpc/types/mod.rs diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index b88a28d72..53d5b2cd6 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -1,5 +1,7 @@ #![feature(plugin)] #![plugin(docopt_macros)] +// required for serde, move it to a separate library +#![feature(custom_derive, custom_attribute)] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; diff --git a/src/bin/client/rpc/impls/eth.rs b/src/bin/client/rpc/impls/eth.rs index f4d102131..8fed91a14 100644 --- a/src/bin/client/rpc/impls/eth.rs +++ b/src/bin/client/rpc/impls/eth.rs @@ -1,4 +1,4 @@ -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use rustc_serialize::hex::ToHex; use util::hash::*; use ethcore::client::*; @@ -63,6 +63,10 @@ impl Eth for EthClient { fn block_transaction_count(&self, _: Params) -> Result { Ok(Value::U64(0)) } + + fn block(&self, _: Params) -> Result { + Ok(Value::Null) + } } pub struct EthFilterClient { diff --git a/src/bin/client/rpc/impls/mod.rs b/src/bin/client/rpc/impls/mod.rs index 813a168fd..f10d613d0 100644 --- a/src/bin/client/rpc/impls/mod.rs +++ b/src/bin/client/rpc/impls/mod.rs @@ -1,7 +1,7 @@ //! Ethereum rpc interface implementation. -pub mod web3; -pub mod eth; -pub mod net; +mod web3; +mod eth; +mod net; pub use self::web3::Web3Client; pub use self::eth::{EthClient, EthFilterClient}; diff --git a/src/bin/client/rpc/mod.rs b/src/bin/client/rpc/mod.rs index bf18e4b5f..64f9137f0 100644 --- a/src/bin/client/rpc/mod.rs +++ b/src/bin/client/rpc/mod.rs @@ -8,7 +8,8 @@ macro_rules! rpcerr { } pub mod traits; -pub mod impls; +mod impls; +mod types; pub use self::traits::{Web3, Eth, EthFilter, Net}; pub use self::impls::*; diff --git a/src/bin/client/rpc/types/block.rs b/src/bin/client/rpc/types/block.rs new file mode 100644 index 000000000..ffb0f8042 --- /dev/null +++ b/src/bin/client/rpc/types/block.rs @@ -0,0 +1,32 @@ +use util::hash::*; +use util::uint::*; + +#[derive(Serialize)] +pub struct Block { + hash: H256, + #[serde(rename="parentHash")] + parent_hash: H256, + #[serde(rename="sha3Uncles")] + uncles_hash: H256, + author: Address, + // TODO: get rid of this one + miner: Address, + #[serde(rename="stateRoot")] + state_root: H256, + #[serde(rename="transactionsRoot")] + transactions_root: H256, + #[serde(rename="receiptsRoot")] + receipts_root: H256, + number: u64, + #[serde(rename="gasUsed")] + gas_used: U256, + #[serde(rename="gasLimit")] + gas_limit: U256, + // TODO: figure out how to properly serialize bytes + //#[serde(rename="extraData")] + //extra_data: Vec, + #[serde(rename="logsBloom")] + logs_bloom: H2048, + timestamp: u64 +} + diff --git a/src/bin/client/rpc/types/mod.rs b/src/bin/client/rpc/types/mod.rs new file mode 100644 index 000000000..fc9210db1 --- /dev/null +++ b/src/bin/client/rpc/types/mod.rs @@ -0,0 +1 @@ +mod block; From 800154a8ae66a941c50a5adacccfe94195831e85 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 14:39:49 +0400 Subject: [PATCH 074/138] client test creating --- src/tests/client.rs | 20 ++++++++++++++++++++ src/tests/mod.rs | 1 + 2 files changed, 21 insertions(+) create mode 100644 src/tests/client.rs diff --git a/src/tests/client.rs b/src/tests/client.rs new file mode 100644 index 000000000..25c8cb9b0 --- /dev/null +++ b/src/tests/client.rs @@ -0,0 +1,20 @@ +use ethereum; +use client::{BlockChainClient,Client}; +use std::env; +use pod_state::*; + + +#[test] +fn test_client_is_created() { + + let mut spec = ethereum::new_frontier_like_test(); + spec.set_genesis_state(PodState::from_json(test.find("pre").unwrap())); + spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); + + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + + let client_result = Client::new(spec, &dir, IOChannel::disconnected()); + + assert!(client_result.is_ok()); +} \ No newline at end of file diff --git a/src/tests/mod.rs b/src/tests/mod.rs index c30f7f9b8..c6e38dbd0 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -4,3 +4,4 @@ mod test_common; mod transaction; mod executive; mod state; +mod client; \ No newline at end of file From d27c7f3902efb5e48635e2b307457c0e6fa20a63 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 26 Jan 2016 11:51:35 +0100 Subject: [PATCH 075/138] rpc block serialize test --- src/bin/client/rpc/types/block.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/bin/client/rpc/types/block.rs b/src/bin/client/rpc/types/block.rs index ffb0f8042..138140f60 100644 --- a/src/bin/client/rpc/types/block.rs +++ b/src/bin/client/rpc/types/block.rs @@ -1,7 +1,7 @@ use util::hash::*; use util::uint::*; -#[derive(Serialize)] +#[derive(Default, Serialize)] pub struct Block { hash: H256, #[serde(rename="parentHash")] @@ -30,3 +30,12 @@ pub struct Block { timestamp: u64 } +#[test] +fn test_block_serialize() { + use serde_json; + + let block = Block::default(); + let serialized = serde_json::to_string(&block).unwrap(); + println!("s: {:?}", serialized); + assert!(false); +} From 46e0a81c52dce6905b3361068d8f63e10524f1d2 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 14:55:12 +0400 Subject: [PATCH 076/138] more of testing --- res/ethereum/tests | 2 +- src/tests/client.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/res/ethereum/tests b/res/ethereum/tests index dc86e6359..e838fd909 160000 --- a/res/ethereum/tests +++ b/res/ethereum/tests @@ -1 +1 @@ -Subproject commit dc86e6359675440aea59ddb48648a01c799925d8 +Subproject commit e838fd90998fc5502d0b7c9427a4c231f9a6953d diff --git a/src/tests/client.rs b/src/tests/client.rs index 25c8cb9b0..c0f202edd 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -2,6 +2,7 @@ use ethereum; use client::{BlockChainClient,Client}; use std::env; use pod_state::*; +use super::test_common::*; #[test] From 7b86f98a7ccbc131f3d4311f1fb104f1d4bc4a83 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 14:57:43 +0400 Subject: [PATCH 077/138] identations fix --- util/src/rlp/untrusted_rlp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index af0fd6ff7..83ee6b87c 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -331,7 +331,7 @@ impl<'a> Decoder for BasicDecoder<'a> { Some(l @ 0...0x7f) => Ok(try!(f(&[l]))), // 0-55 bytes Some(l @ 0x80...0xb7) => { - let last_index_of = 1 + l as usize - 0x80; + let last_index_of = 1 + l as usize - 0x80; if bytes.len() < last_index_of { return Err(DecoderError::RlpInconsistentLengthAndData); } From 3ac40b68f862f734871bbc7ca19f9dc840689ea0 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 26 Jan 2016 13:14:22 +0100 Subject: [PATCH 078/138] rpc and bin moved to its own crates --- Cargo.toml | 10 ---------- bin/Cargo.toml | 20 +++++++++++++++++++ {src/bin/client => bin/src}/main.rs | 5 ++--- rpc/Cargo.toml | 19 ++++++++++++++++++ {src/bin/client/rpc => rpc/src}/impls/eth.rs | 4 ++-- {src/bin/client/rpc => rpc/src}/impls/mod.rs | 0 {src/bin/client/rpc => rpc/src}/impls/net.rs | 4 ++-- {src/bin/client/rpc => rpc/src}/impls/web3.rs | 4 ++-- src/bin/client/rpc/mod.rs => rpc/src/lib.rs | 8 +++++++- {src/bin/client/rpc => rpc/src}/traits/eth.rs | 2 +- {src/bin/client/rpc => rpc/src}/traits/mod.rs | 0 {src/bin/client/rpc => rpc/src}/traits/net.rs | 2 +- .../bin/client/rpc => rpc/src}/traits/web3.rs | 2 +- .../bin/client/rpc => rpc/src}/types/block.rs | 8 ++++---- {src/bin/client/rpc => rpc/src}/types/mod.rs | 0 15 files changed, 61 insertions(+), 27 deletions(-) create mode 100644 bin/Cargo.toml rename {src/bin/client => bin/src}/main.rs (98%) create mode 100644 rpc/Cargo.toml rename {src/bin/client/rpc => rpc/src}/impls/eth.rs (97%) rename {src/bin/client/rpc => rpc/src}/impls/mod.rs (100%) rename {src/bin/client/rpc => rpc/src}/impls/net.rs (88%) rename {src/bin/client/rpc => rpc/src}/impls/web3.rs (88%) rename src/bin/client/rpc/mod.rs => rpc/src/lib.rs (80%) rename {src/bin/client/rpc => rpc/src}/traits/eth.rs (99%) rename {src/bin/client/rpc => rpc/src}/traits/mod.rs (100%) rename {src/bin/client/rpc => rpc/src}/traits/net.rs (95%) rename {src/bin/client/rpc => rpc/src}/traits/web3.rs (94%) rename {src/bin/client/rpc => rpc/src}/types/block.rs (84%) rename {src/bin/client/rpc => rpc/src}/types/mod.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 14f12e646..a0ea692b3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,18 +21,8 @@ evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" clippy = "0.0.37" -docopt = "0.6" -docopt_macros = "0.6" -ctrlc = "1.0" -jsonrpc-core = { version = "1.0", optional = true } -jsonrpc-http-server = { version = "1.0", optional = true } [features] jit = ["evmjit"] evm_debug = [] test-heavy = [] -rpc = ["jsonrpc-core", "jsonrpc-http-server"] - -[[bin]] -name = "client" -path = "src/bin/client/main.rs" diff --git a/bin/Cargo.toml b/bin/Cargo.toml new file mode 100644 index 000000000..ba258b586 --- /dev/null +++ b/bin/Cargo.toml @@ -0,0 +1,20 @@ +[package] +description = "Ethcore client." +name = "ethcore-client" +version = "0.1.0" +license = "GPL-3.0" +authors = ["Ethcore "] + +[dependencies] +log = "0.3" +env_logger = "0.3" +rustc-serialize = "0.3" +docopt = "0.6" +docopt_macros = "0.6" +ctrlc = "1.0" +ethcore-util = { path = "../util" } +ethcore-rpc = { path = "../rpc", optional = true } +ethcore = { path = ".." } + +[features] +rpc = ["ethcore-rpc"] diff --git a/src/bin/client/main.rs b/bin/src/main.rs similarity index 98% rename from src/bin/client/main.rs rename to bin/src/main.rs index 53d5b2cd6..942a5cf24 100644 --- a/src/bin/client/main.rs +++ b/bin/src/main.rs @@ -1,7 +1,6 @@ #![feature(plugin)] #![plugin(docopt_macros)] // required for serde, move it to a separate library -#![feature(custom_derive, custom_attribute)] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -11,7 +10,7 @@ extern crate env_logger; extern crate ctrlc; #[cfg(feature = "rpc")] -mod rpc; +extern crate ethcore_rpc as rpc; use std::env; use log::{LogLevelFilter}; @@ -52,7 +51,7 @@ fn setup_log(init: &String) { #[cfg(feature = "rpc")] fn setup_rpc_server(client: Arc) { - use self::rpc::*; + use rpc::*; let mut server = HttpServer::new(1); server.add_delegate(Web3Client::new().to_delegate()); diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml new file mode 100644 index 000000000..d0f9f50e8 --- /dev/null +++ b/rpc/Cargo.toml @@ -0,0 +1,19 @@ +[package] +description = "Ethcore jsonrpc" +name = "ethcore-rpc" +version = "0.1.0" +license = "GPL-3.0" +authors = ["Marek Kotewicz , diff --git a/src/bin/client/rpc/impls/mod.rs b/rpc/src/impls/mod.rs similarity index 100% rename from src/bin/client/rpc/impls/mod.rs rename to rpc/src/impls/mod.rs diff --git a/src/bin/client/rpc/impls/net.rs b/rpc/src/impls/net.rs similarity index 88% rename from src/bin/client/rpc/impls/net.rs rename to rpc/src/impls/net.rs index f0109429c..a1d36de54 100644 --- a/src/bin/client/rpc/impls/net.rs +++ b/rpc/src/impls/net.rs @@ -1,6 +1,6 @@ //! Net rpc implementation. -use rpc::jsonrpc_core::*; -use rpc::Net; +use jsonrpc_core::*; +use traits::Net; pub struct NetClient; diff --git a/src/bin/client/rpc/impls/web3.rs b/rpc/src/impls/web3.rs similarity index 88% rename from src/bin/client/rpc/impls/web3.rs rename to rpc/src/impls/web3.rs index b7d8919e2..58e7858eb 100644 --- a/src/bin/client/rpc/impls/web3.rs +++ b/rpc/src/impls/web3.rs @@ -1,5 +1,5 @@ -use rpc::jsonrpc_core::*; -use rpc::Web3; +use jsonrpc_core::*; +use traits::Web3; pub struct Web3Client; diff --git a/src/bin/client/rpc/mod.rs b/rpc/src/lib.rs similarity index 80% rename from src/bin/client/rpc/mod.rs rename to rpc/src/lib.rs index 64f9137f0..148e9f134 100644 --- a/src/bin/client/rpc/mod.rs +++ b/rpc/src/lib.rs @@ -1,5 +1,12 @@ +#![feature(custom_derive, custom_attribute, plugin)] +#![plugin(serde_macros)] + +extern crate rustc_serialize; +extern crate serde; extern crate jsonrpc_core; extern crate jsonrpc_http_server; +extern crate ethcore_util as util; +extern crate ethcore; use self::jsonrpc_core::{IoHandler, IoDelegate}; @@ -14,7 +21,6 @@ mod types; pub use self::traits::{Web3, Eth, EthFilter, Net}; pub use self::impls::*; - pub struct HttpServer { handler: IoHandler, threads: usize diff --git a/src/bin/client/rpc/traits/eth.rs b/rpc/src/traits/eth.rs similarity index 99% rename from src/bin/client/rpc/traits/eth.rs rename to rpc/src/traits/eth.rs index 856111444..31e9df164 100644 --- a/src/bin/client/rpc/traits/eth.rs +++ b/rpc/src/traits/eth.rs @@ -1,6 +1,6 @@ //! Eth rpc interface. use std::sync::Arc; -use rpc::jsonrpc_core::*; +use jsonrpc_core::*; /// Eth rpc interface. pub trait Eth: Sized + Send + Sync + 'static { diff --git a/src/bin/client/rpc/traits/mod.rs b/rpc/src/traits/mod.rs similarity index 100% rename from src/bin/client/rpc/traits/mod.rs rename to rpc/src/traits/mod.rs diff --git a/src/bin/client/rpc/traits/net.rs b/rpc/src/traits/net.rs similarity index 95% rename from src/bin/client/rpc/traits/net.rs rename to rpc/src/traits/net.rs index 63c64edb3..4df8d7114 100644 --- a/src/bin/client/rpc/traits/net.rs +++ b/rpc/src/traits/net.rs @@ -1,6 +1,6 @@ //! Net rpc interface. use std::sync::Arc; -use rpc::jsonrpc_core::*; +use jsonrpc_core::*; /// Net rpc interface. pub trait Net: Sized + Send + Sync + 'static { diff --git a/src/bin/client/rpc/traits/web3.rs b/rpc/src/traits/web3.rs similarity index 94% rename from src/bin/client/rpc/traits/web3.rs rename to rpc/src/traits/web3.rs index b71c867aa..8e73d4304 100644 --- a/src/bin/client/rpc/traits/web3.rs +++ b/rpc/src/traits/web3.rs @@ -1,6 +1,6 @@ //! Web3 rpc interface. use std::sync::Arc; -use rpc::jsonrpc_core::*; +use jsonrpc_core::*; /// Web3 rpc interface. pub trait Web3: Sized + Send + Sync + 'static { diff --git a/src/bin/client/rpc/types/block.rs b/rpc/src/types/block.rs similarity index 84% rename from src/bin/client/rpc/types/block.rs rename to rpc/src/types/block.rs index 138140f60..c15c8186d 100644 --- a/src/bin/client/rpc/types/block.rs +++ b/rpc/src/types/block.rs @@ -1,7 +1,7 @@ use util::hash::*; use util::uint::*; -#[derive(Default, Serialize)] +#[derive(Default)] pub struct Block { hash: H256, #[serde(rename="parentHash")] @@ -35,7 +35,7 @@ fn test_block_serialize() { use serde_json; let block = Block::default(); - let serialized = serde_json::to_string(&block).unwrap(); - println!("s: {:?}", serialized); - assert!(false); + //let serialized = serde_json::to_string(&block).unwrap(); + //println!("s: {:?}", serialized); + //assert!(false); } diff --git a/src/bin/client/rpc/types/mod.rs b/rpc/src/types/mod.rs similarity index 100% rename from src/bin/client/rpc/types/mod.rs rename to rpc/src/types/mod.rs From 81bffd48e89be4d4d2745f69f00c006743fdd2ab Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 17:33:22 +0400 Subject: [PATCH 079/138] basic client tests working --- src/block.rs | 2 +- src/block_queue.rs | 2 +- src/tests/client.rs | 92 ++++++++++++++++++++++++++++++++++++++------- 3 files changed, 80 insertions(+), 16 deletions(-) diff --git a/src/block.rs b/src/block.rs index 1ff326430..0e928e9b0 100644 --- a/src/block.rs +++ b/src/block.rs @@ -85,7 +85,7 @@ impl IsBlock for Block { /// /// It's a bit like a Vec, eccept that whenever a transaction is pushed, we execute it and /// maintain the system `state()`. We also archive execution receipts in preparation for later block creation. -pub struct OpenBlock<'x, 'y> { + pub struct OpenBlock<'x, 'y> { block: Block, engine: &'x Engine, last_hashes: &'y LastHashes, diff --git a/src/block_queue.rs b/src/block_queue.rs index fa091d0c4..c45a86a8b 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -191,7 +191,7 @@ impl BlockQueue { /// Wait for queue to be empty pub fn flush(&mut self) { let mut verification = self.verification.lock().unwrap(); - while !verification.unverified.is_empty() && !verification.verifying.is_empty() { + while !verification.unverified.is_empty() || !verification.verifying.is_empty() { verification = self.empty.wait(verification).unwrap(); } } diff --git a/src/tests/client.rs b/src/tests/client.rs index c0f202edd..c14f2e2f0 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -1,21 +1,85 @@ -use ethereum; -use client::{BlockChainClient,Client}; + use client::{BlockChainClient,Client}; use std::env; -use pod_state::*; use super::test_common::*; +use std::path::PathBuf; +use spec::*; + +#[cfg(test)] +fn get_random_temp_dir() -> PathBuf { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + dir +} + +#[cfg(test)] +fn get_test_spec() -> Spec { + Spec::new_test() +} + +#[cfg(test)] +fn get_good_dummy_block() -> Bytes { + let mut block_header = Header::new(); + let test_spec = get_test_spec(); + let test_engine = test_spec.to_engine().unwrap(); + block_header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap()); + block_header.difficulty = decode(test_engine.spec().engine_params.get("minimumDifficulty").unwrap()); + block_header.timestamp = 40; + block_header.number = 1; + block_header.parent_hash = test_engine.spec().genesis_header().hash(); + block_header.state_root = test_engine.spec().genesis_header().state_root; + + create_test_block(&block_header) +} + +#[cfg(test)] +fn create_test_block(header: &Header) -> Bytes { + let mut rlp = RlpStream::new_list(3); + rlp.append(header); + rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); + rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); + rlp.out() +} #[test] -fn test_client_is_created() { - - let mut spec = ethereum::new_frontier_like_test(); - spec.set_genesis_state(PodState::from_json(test.find("pre").unwrap())); - spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); - - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - - let client_result = Client::new(spec, &dir, IOChannel::disconnected()); - +fn created() { + let client_result = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()); assert!(client_result.is_ok()); +} + +#[test] +fn imports_from_empty() { + let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + client.import_verified_blocks(&IoChannel::disconnected()); + client.flush_queue(); +} + +#[test] +fn imports_good_block() { + + ::env_logger::init().ok(); + + let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + + let good_block = get_good_dummy_block(); + if let Err(_) = client.import_block(good_block) { + panic!("error importing block being good by definition"); + } + + client.flush_queue(); + + client.import_verified_blocks(&IoChannel::disconnected()); + + let block = client.block_header_at(1).unwrap(); + + assert!(!block.is_empty()); +} + +#[test] +fn query_none_block() { + let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + + let non_existant = client.block_header_at(188); + + assert!(non_existant == Option::None); } \ No newline at end of file From 8d30f9fecbd91e132ccf368d4b6eb35ed403718f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 17:55:08 +0400 Subject: [PATCH 080/138] bad block import checks --- src/tests/client.rs | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/src/tests/client.rs b/src/tests/client.rs index c14f2e2f0..a9d212bc1 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -31,6 +31,21 @@ fn get_good_dummy_block() -> Bytes { create_test_block(&block_header) } +#[cfg(test)] +fn get_bad_state_dummy_block() -> Bytes { + let mut block_header = Header::new(); + let test_spec = get_test_spec(); + let test_engine = test_spec.to_engine().unwrap(); + block_header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap()); + block_header.difficulty = decode(test_engine.spec().engine_params.get("minimumDifficulty").unwrap()); + block_header.timestamp = 40; + block_header.number = 1; + block_header.parent_hash = test_engine.spec().genesis_header().hash(); + block_header.state_root = x!(0xbad); + + create_test_block(&block_header) +} + #[cfg(test)] fn create_test_block(header: &Header) -> Bytes { let mut rlp = RlpStream::new_list(3); @@ -40,6 +55,23 @@ fn create_test_block(header: &Header) -> Bytes { rlp.out() } +#[cfg(test)] +fn get_test_client_with_blocks(blocks: Vec) -> Arc { + let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + + for block in &blocks { + if let Err(_) = client.import_block(block.clone()) { + panic!("panic importing block which is well-formed"); + } + } + + client.flush_queue(); + + client.import_verified_blocks(&IoChannel::disconnected()); + + client +} + #[test] fn created() { @@ -81,5 +113,13 @@ fn query_none_block() { let non_existant = client.block_header_at(188); - assert!(non_existant == Option::None); + assert!(non_existant.is_none()); +} + +#[test] +fn query_bad_block() { + let client = get_test_client_with_blocks(vec![get_bad_state_dummy_block()]); + let bad_block:Option = client.block_header_at(1); + + assert!(bad_block.is_none()); } \ No newline at end of file From 387e3ec3fd38df75a23997b2190cf138ccb0ad84 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 15:00:22 +0100 Subject: [PATCH 081/138] Ensure Spec::ensure_db_good() places DB entries for code & storage. --- src/account.rs | 6 +++++- src/blockchain.rs | 5 +++++ src/client.rs | 17 ++++++++++++++++- src/evm/interpreter.rs | 3 +++ src/executive.rs | 11 +++++------ src/externalities.rs | 1 + src/pod_account.rs | 14 +++++++++++++- src/spec.rs | 5 +++-- src/state.rs | 1 + src/tests/chain.rs | 5 +++-- util/src/trie/sectriedbmut.rs | 6 ++++++ util/src/trie/triedbmut.rs | 5 +++++ 12 files changed, 66 insertions(+), 13 deletions(-) diff --git a/src/account.rs b/src/account.rs index b0fbf3f85..409637d6f 100644 --- a/src/account.rs +++ b/src/account.rs @@ -149,11 +149,15 @@ impl Account { /// Provide a database to lookup `code_hash`. Should not be called if it is a contract without code. pub fn cache_code(&mut self, db: &HashDB) -> bool { // TODO: fill out self.code_cache; + trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.is_cached() || match self.code_hash { Some(ref h) => match db.lookup(h) { Some(x) => { self.code_cache = x.to_vec(); true }, - _ => false, + _ => { + warn!("Failed reverse lookup of {}", h); + false + }, }, _ => false, } diff --git a/src/blockchain.rs b/src/blockchain.rs index 3bd31688a..da9ee04c2 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -107,6 +107,11 @@ pub trait BlockProvider { fn genesis_hash(&self) -> H256 { self.block_hash(0).expect("Genesis hash should always exist") } + + /// Returns the header of the genesis block. + fn genesis_header(&self) -> Header { + self.block_header(&self.genesis_hash()).unwrap() + } } #[derive(Debug, Hash, Eq, PartialEq, Clone)] diff --git a/src/client.rs b/src/client.rs index 2cc6c150d..91ccf4b95 100644 --- a/src/client.rs +++ b/src/client.rs @@ -4,8 +4,10 @@ use blockchain::{BlockChain, BlockProvider, CacheSize}; use views::BlockView; use error::*; use header::BlockNumber; +use state::State; use spec::Spec; use engine::Engine; +use views::HeaderView; use block_queue::{BlockQueue, BlockQueueInfo}; use service::NetSyncMessage; use env_info::LastHashes; @@ -98,6 +100,11 @@ pub trait BlockChainClient : Sync + Send { /// Get blockchain information. fn chain_info(&self) -> BlockChainInfo; + + /// Get the best block header. + fn best_block_header(&self) -> Bytes { + self.block_header(&self.chain_info().best_block_hash).unwrap() + } } #[derive(Default, Clone, Debug, Eq, PartialEq)] @@ -137,7 +144,9 @@ const HISTORY: u64 = 1000; impl Client { /// Create a new client with given spec and DB path. pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result, Error> { - let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); + let gb = spec.genesis_block(); + flushln!("Spec says genesis block is {}", gb.pretty()); + let chain = Arc::new(RwLock::new(BlockChain::new(&gb, path))); let mut opts = Options::new(); opts.set_max_open_files(256); opts.create_if_missing(true); @@ -168,6 +177,7 @@ impl Client { if engine.spec().ensure_db_good(&mut state_db) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } + flushln!("Client::new: commiting. Best root now: {}. contains: {}", chain.read().unwrap().genesis_header().state_root, state_db.contains(&chain.read().unwrap().genesis_header().state_root)); Ok(Arc::new(Client { chain: chain, engine: engine.clone(), @@ -261,6 +271,11 @@ impl Client { self.uncommited_states.write().unwrap().remove(hash); } + /// Get a copy of the best block's state. + pub fn state(&self) -> State { + State::from_existing(self.state_db.clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) + } + /// Get info on the cache. pub fn cache_info(&self) -> CacheSize { self.chain.read().unwrap().cache_size() diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index 7657b1bbe..2f43b5199 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -275,6 +275,8 @@ impl evm::Evm for Interpreter { code: &code }; + flushln!("Executing: {:?}", params); + while reader.position < code.len() { let instruction = code[reader.position]; reader.position += 1; @@ -640,6 +642,7 @@ impl Interpreter { return Ok(InstructionResult::StopExecution); }, instructions::SUICIDE => { + flushln!("SUICIDE!"); let address = stack.pop_back(); ext.suicide(&u256_to_address(&address)); return Ok(InstructionResult::StopExecution); diff --git a/src/executive.rs b/src/executive.rs index f5952e530..9c3a669cf 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -166,7 +166,6 @@ impl<'a> Executive<'a> { /// Modifies the substate and the output. /// Returns either gas_left or `evm::Error`. pub fn call(&mut self, params: ActionParams, substate: &mut Substate, mut output: BytesRef) -> evm::Result { - println!("Calling executive. Sender: {}", params.sender); // backup used in case of running out of gas let backup = self.state.clone(); @@ -174,7 +173,7 @@ impl<'a> Executive<'a> { if let ActionValue::Transfer(val) = params.value { self.state.transfer_balance(¶ms.sender, ¶ms.address, &val); } - trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); + flushln!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); if self.engine.is_builtin(¶ms.code_address) { // if destination is builtin, try to execute it @@ -261,17 +260,17 @@ impl<'a> Executive<'a> { let refund_value = gas_left * t.gas_price; let fees_value = gas_used * t.gas_price; - trace!("exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", + flushln!("exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", t.gas, sstore_refunds, suicide_refunds, refunds_bound, gas_left_prerefund, refunded, gas_left, gas_used, refund_value, fees_value); - trace!("exec::finalize: Refunding refund_value={}, sender={}\n", refund_value, t.sender().unwrap()); + flushln!("exec::finalize: Refunding refund_value={}, sender={}\n", refund_value, t.sender().unwrap()); self.state.add_balance(&t.sender().unwrap(), &refund_value); - trace!("exec::finalize: Compensating author: fees_value={}, author={}\n", fees_value, &self.info.author); + flushln!("exec::finalize: Compensating author: fees_value={}, author={}\n", fees_value, &self.info.author); self.state.add_balance(&self.info.author, &fees_value); // perform suicides for address in &substate.suicides { - trace!("Killing {}", address); + flushln!("Killing {}", address); self.state.kill_account(address); } diff --git a/src/externalities.rs b/src/externalities.rs index f9a79c3c0..e7a73d553 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -215,6 +215,7 @@ impl<'a> Ext for Externalities<'a> { fn suicide(&mut self, refund_address: &Address) { let address = self.origin_info.address.clone(); let balance = self.balance(&address); + flushln!("Suiciding {} -> {} (xfer: {})", address, refund_address, balance); self.state.transfer_balance(&address, refund_address, &balance); self.substate.suicides.insert(address); } diff --git a/src/pod_account.rs b/src/pod_account.rs index 81b8b1c44..979658202 100644 --- a/src/pod_account.rs +++ b/src/pod_account.rs @@ -31,7 +31,7 @@ impl PodAccount { } } - /// TODO [Gav Wood] Please document me + /// Returns the RLP for this account. pub fn rlp(&self) -> Bytes { let mut stream = RlpStream::new_list(4); stream.append(&self.nonce); @@ -40,6 +40,18 @@ impl PodAccount { stream.append(&self.code.sha3()); stream.out() } + + /// Place additional data into given hash DB. + pub fn insert_additional(&self, db: &mut HashDB) { + if !self.code.is_empty() { + db.insert(&self.code); + } + let mut r = H256::new(); + let mut t = SecTrieDBMut::new(db, &mut r); + for (k, v) in &self.storage { + t.insert(k, &encode(&U256::from(v.as_slice()))); + } + } } impl fmt::Display for PodAccount { diff --git a/src/spec.rs b/src/spec.rs index 326552524..f4166eab2 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -261,7 +261,6 @@ impl Spec { /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good(&self, db: &mut HashDB) -> bool { if !db.contains(&self.state_root()) { - info!("Populating genesis state..."); let mut root = H256::new(); { let mut t = SecTrieDBMut::new(db, &mut root); @@ -269,8 +268,10 @@ impl Spec { t.insert(address.as_slice(), &account.rlp()); } } + for (_, account) in self.genesis_state.get().iter() { + account.insert_additional(db); + } assert!(db.contains(&self.state_root())); - info!("Genesis state is ready"); true } else { false } } diff --git a/src/state.rs b/src/state.rs index 7310f63a7..238638617 100644 --- a/src/state.rs +++ b/src/state.rs @@ -103,6 +103,7 @@ impl State { /// Mutate storage of account `a` so that it is `value` for `key`. pub fn code(&self, a: &Address) -> Option { + flushln!("Getting code at {}", a); self.get(a, true).as_ref().map_or(None, |a|a.code().map(|x|x.to_vec())) } diff --git a/src/tests/chain.rs b/src/tests/chain.rs index ba42ae935..046c16052 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -22,7 +22,8 @@ fn do_json_test(json_data: &[u8]) -> Vec { let blocks: Vec = test["blocks"].as_array().unwrap().iter().map(|e| xjson!(&e["rlp"])).collect(); let mut spec = ethereum::new_frontier_like_test(); - spec.set_genesis_state(PodState::from_json(test.find("pre").unwrap())); + let s = PodState::from_json(test.find("pre").unwrap()); + spec.set_genesis_state(s); spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); assert!(spec.is_state_root_valid()); @@ -56,7 +57,7 @@ declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHea declare_test!{ignore => BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // FAILS declare_test!{ignore => BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // FAILS declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} -declare_test!{ignore => BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} // FAILS (Suicides, GasUsed) +declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} // FAILS (Suicides, GasUsed) declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} declare_test!{ignore => BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} // FAILS declare_test!{ignore => BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // FAILS diff --git a/util/src/trie/sectriedbmut.rs b/util/src/trie/sectriedbmut.rs index 99c2e2bb1..7f8e292f0 100644 --- a/util/src/trie/sectriedbmut.rs +++ b/util/src/trie/sectriedbmut.rs @@ -25,6 +25,12 @@ impl<'db> SecTrieDBMut<'db> { pub fn from_existing(db: &'db mut HashDB, root: &'db mut H256) -> Self { SecTrieDBMut { raw: TrieDBMut::from_existing(db, root) } } + + /// Get the backing database. + pub fn db(&'db self) -> &'db HashDB { self.raw.db() } + + /// Get the backing database. + pub fn db_mut(&'db mut self) -> &'db mut HashDB { self.raw.db_mut() } } impl<'db> Trie for SecTrieDBMut<'db> { diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 48749bf0d..17d3f5866 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -87,6 +87,11 @@ impl<'db> TrieDBMut<'db> { self.db } + /// Get the backing database. + pub fn db_mut(&'db mut self) -> &'db mut HashDB { + self.db + } + /// Determine all the keys in the backing database that belong to the trie. pub fn keys(&self) -> Vec { let mut ret: Vec = Vec::new(); From 50d1038cc58db35417fe10284e57f73bb5db0a75 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 15:02:14 +0100 Subject: [PATCH 082/138] Remove flushln!s. --- src/client.rs | 3 +-- src/evm/interpreter.rs | 3 --- src/executive.rs | 9 ++++----- src/externalities.rs | 2 +- src/state.rs | 1 - 5 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/client.rs b/src/client.rs index 91ccf4b95..59568584b 100644 --- a/src/client.rs +++ b/src/client.rs @@ -145,7 +145,6 @@ impl Client { /// Create a new client with given spec and DB path. pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result, Error> { let gb = spec.genesis_block(); - flushln!("Spec says genesis block is {}", gb.pretty()); let chain = Arc::new(RwLock::new(BlockChain::new(&gb, path))); let mut opts = Options::new(); opts.set_max_open_files(256); @@ -177,7 +176,7 @@ impl Client { if engine.spec().ensure_db_good(&mut state_db) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } - flushln!("Client::new: commiting. Best root now: {}. contains: {}", chain.read().unwrap().genesis_header().state_root, state_db.contains(&chain.read().unwrap().genesis_header().state_root)); + trace!("Client::new: commiting. Best root now: {}. contains: {}", chain.read().unwrap().genesis_header().state_root, state_db.contains(&chain.read().unwrap().genesis_header().state_root)); Ok(Arc::new(Client { chain: chain, engine: engine.clone(), diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index 2f43b5199..7657b1bbe 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -275,8 +275,6 @@ impl evm::Evm for Interpreter { code: &code }; - flushln!("Executing: {:?}", params); - while reader.position < code.len() { let instruction = code[reader.position]; reader.position += 1; @@ -642,7 +640,6 @@ impl Interpreter { return Ok(InstructionResult::StopExecution); }, instructions::SUICIDE => { - flushln!("SUICIDE!"); let address = stack.pop_back(); ext.suicide(&u256_to_address(&address)); return Ok(InstructionResult::StopExecution); diff --git a/src/executive.rs b/src/executive.rs index 9c3a669cf..07ab4c4c5 100644 --- a/src/executive.rs +++ b/src/executive.rs @@ -173,7 +173,7 @@ impl<'a> Executive<'a> { if let ActionValue::Transfer(val) = params.value { self.state.transfer_balance(¶ms.sender, ¶ms.address, &val); } - flushln!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); + trace!("Executive::call(params={:?}) self.env_info={:?}", params, self.info); if self.engine.is_builtin(¶ms.code_address) { // if destination is builtin, try to execute it @@ -260,17 +260,16 @@ impl<'a> Executive<'a> { let refund_value = gas_left * t.gas_price; let fees_value = gas_used * t.gas_price; - flushln!("exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", + trace!("exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", t.gas, sstore_refunds, suicide_refunds, refunds_bound, gas_left_prerefund, refunded, gas_left, gas_used, refund_value, fees_value); - flushln!("exec::finalize: Refunding refund_value={}, sender={}\n", refund_value, t.sender().unwrap()); + trace!("exec::finalize: Refunding refund_value={}, sender={}\n", refund_value, t.sender().unwrap()); self.state.add_balance(&t.sender().unwrap(), &refund_value); - flushln!("exec::finalize: Compensating author: fees_value={}, author={}\n", fees_value, &self.info.author); + trace!("exec::finalize: Compensating author: fees_value={}, author={}\n", fees_value, &self.info.author); self.state.add_balance(&self.info.author, &fees_value); // perform suicides for address in &substate.suicides { - flushln!("Killing {}", address); self.state.kill_account(address); } diff --git a/src/externalities.rs b/src/externalities.rs index e7a73d553..f1b8c1958 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -215,7 +215,7 @@ impl<'a> Ext for Externalities<'a> { fn suicide(&mut self, refund_address: &Address) { let address = self.origin_info.address.clone(); let balance = self.balance(&address); - flushln!("Suiciding {} -> {} (xfer: {})", address, refund_address, balance); + trace!("Suiciding {} -> {} (xfer: {})", address, refund_address, balance); self.state.transfer_balance(&address, refund_address, &balance); self.substate.suicides.insert(address); } diff --git a/src/state.rs b/src/state.rs index 238638617..7310f63a7 100644 --- a/src/state.rs +++ b/src/state.rs @@ -103,7 +103,6 @@ impl State { /// Mutate storage of account `a` so that it is `value` for `key`. pub fn code(&self, a: &Address) -> Option { - flushln!("Getting code at {}", a); self.get(a, true).as_ref().map_or(None, |a|a.code().map(|x|x.to_vec())) } From 253f56afe1f5928f76b1e0efcbe75193f74f0021 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 18:03:12 +0400 Subject: [PATCH 083/138] hack indent --- src/block.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/block.rs b/src/block.rs index 0e928e9b0..31378e8a6 100644 --- a/src/block.rs +++ b/src/block.rs @@ -108,6 +108,7 @@ pub struct SealedBlock { uncle_bytes: Bytes, } + impl<'x, 'y> OpenBlock<'x, 'y> { /// Create a new OpenBlock ready for transaction pushing. pub fn new<'a, 'b>(engine: &'a Engine, db: JournalDB, parent: &Header, last_hashes: &'b LastHashes, author: Address, extra_data: Bytes) -> OpenBlock<'a, 'b> { From f0da7bde2bcc82bc4b0ffd5310d2249e68f2262a Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 15:33:24 +0100 Subject: [PATCH 084/138] Fix StateTests. Closes #214 --- src/block_queue.rs | 4 +++- src/client.rs | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/block_queue.rs b/src/block_queue.rs index fa091d0c4..e0868c011 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -158,6 +158,7 @@ impl BlockQueue { }, Err(err) => { let mut v = verification.lock().unwrap(); + flushln!("Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); v.bad.insert(block_hash.clone()); v.verifying.retain(|e| e.hash != block_hash); @@ -191,7 +192,7 @@ impl BlockQueue { /// Wait for queue to be empty pub fn flush(&mut self) { let mut verification = self.verification.lock().unwrap(); - while !verification.unverified.is_empty() && !verification.verifying.is_empty() { + while !verification.unverified.is_empty() || !verification.verifying.is_empty() { verification = self.empty.wait(verification).unwrap(); } } @@ -221,6 +222,7 @@ impl BlockQueue { self.more_to_verify.notify_all(); }, Err(err) => { + flushln!("Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); self.verification.lock().unwrap().bad.insert(header.hash()); } diff --git a/src/client.rs b/src/client.rs index 59568584b..453f4ab64 100644 --- a/src/client.rs +++ b/src/client.rs @@ -199,6 +199,7 @@ impl Client { let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.write().unwrap().drain(128); for block in blocks { + flushln!("Importing {}...", block.header.hash()); if bad.contains(&block.header.parent_hash) { self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); @@ -207,6 +208,7 @@ impl Client { let header = &block.header; if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { + flushln!("Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); @@ -215,6 +217,7 @@ impl Client { let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) { Some(p) => p, None => { + flushln!("Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); @@ -238,8 +241,9 @@ impl Client { let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { Ok(b) => b, Err(e) => { + flushln!("Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - bad.insert(block.header.hash()); + bad.insert(block.header.hash()); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); return; } From 9da88e99f84c6d3d73213f302496f43af5bec971 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 15:37:14 +0100 Subject: [PATCH 085/138] Disable flushln for now. --- src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 453f4ab64..9529e356c 100644 --- a/src/client.rs +++ b/src/client.rs @@ -199,7 +199,7 @@ impl Client { let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.write().unwrap().drain(128); for block in blocks { - flushln!("Importing {}...", block.header.hash()); +// flushln!("Importing {}...", block.header.hash()); if bad.contains(&block.header.parent_hash) { self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); From 0e5d9ee2a00a2afd20e9a41086078ef36cd3ffdf Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 15:39:49 +0100 Subject: [PATCH 086/138] Closes #215 --- src/tests/chain.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 046c16052..e7b1759f2 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -48,7 +48,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } -declare_test!{ignore => BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} // FAILS +declare_test!{BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // FAILS declare_test!{ignore => BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // FAILS @@ -57,7 +57,7 @@ declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHea declare_test!{ignore => BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // FAILS declare_test!{ignore => BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // FAILS declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} -declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} // FAILS (Suicides, GasUsed) +declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} declare_test!{ignore => BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} // FAILS declare_test!{ignore => BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // FAILS From 18f1d5e8514f38c337b2d6d3771147234172cf79 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 17:16:06 +0100 Subject: [PATCH 087/138] Address issue on PR. --- src/client.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 9529e356c..a83ff554e 100644 --- a/src/client.rs +++ b/src/client.rs @@ -176,7 +176,6 @@ impl Client { if engine.spec().ensure_db_good(&mut state_db) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } - trace!("Client::new: commiting. Best root now: {}. contains: {}", chain.read().unwrap().genesis_header().state_root, state_db.contains(&chain.read().unwrap().genesis_header().state_root)); Ok(Arc::new(Client { chain: chain, engine: engine.clone(), From 8a665fe313870015efd484e57d909730ebfc333e Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 18:02:49 +0100 Subject: [PATCH 088/138] Tests check block format. --- src/ethereum/ethash.rs | 4 ++++ src/tests/chain.rs | 33 +++++++++++++++++++++++---------- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index aebee1e92..057a67d20 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -99,6 +99,10 @@ impl Engine for Ethash { } fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { + // check the seal fields. + try!(UntrustedRlp::new(&header.seal[0]).as_val::()); + try!(UntrustedRlp::new(&header.seal[1]).as_val::()); + let min_difficulty = decode(self.spec().engine_params.get("minimumDifficulty").unwrap()); if header.difficulty < min_difficulty { return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: min_difficulty, found: header.difficulty }))) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index e7b1759f2..8be1b1c92 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -31,9 +31,22 @@ fn do_json_test(json_data: &[u8]) -> Vec { dir.push(H32::random().hex()); { let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); - blocks.into_iter().foreach(|b| { + for b in blocks.into_iter() { + { + let urlp = UntrustedRlp::new(&b); + if !urlp.is_list() || urlp.item_count() != 3 || urlp.size() != b.len() { continue; } + if urlp.val_at::
(0).is_err() { continue; } + if !urlp.at(1).unwrap().is_list() { continue; } + if urlp.at(1).unwrap().iter().find(|i| i.as_val::().is_err()).is_some() { + continue; + } + if !urlp.at(2).unwrap().is_list() { continue; } + if urlp.at(2).unwrap().iter().find(|i| i.as_val::
().is_err()).is_some() { + continue; + } + } client.import_block(b).unwrap(); - }); + } client.flush_queue(); client.import_verified_blocks(&IoChannel::disconnected()); fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); @@ -50,16 +63,16 @@ fn do_json_test(json_data: &[u8]) -> Vec { declare_test!{BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} -declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // FAILS -declare_test!{ignore => BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // FAILS +declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // STILL FAILS +declare_test!{BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // STILL FAILS declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} -declare_test!{ignore => BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // FAILS -declare_test!{ignore => BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // FAILS +declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // FAILS +declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // FAILS declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} -declare_test!{ignore => BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} // FAILS -declare_test!{ignore => BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // FAILS -declare_test!{ignore => BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // FAILS -declare_test!{ignore => BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} // FAILS +declare_test!{BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} // FAILS +declare_test!{BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // FAILS +declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // FAILS +declare_test!{BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} // FAILS From b8c2f9cf15c77257384833e29bdeef9ec809dc61 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 21:05:25 +0400 Subject: [PATCH 089/138] chain info test --- src/tests/client.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/tests/client.rs b/src/tests/client.rs index a9d212bc1..f6641ff91 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -1,4 +1,4 @@ - use client::{BlockChainClient,Client}; +use client::{BlockChainClient,Client}; use std::env; use super::test_common::*; use std::path::PathBuf; @@ -88,9 +88,6 @@ fn imports_from_empty() { #[test] fn imports_good_block() { - - ::env_logger::init().ok(); - let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); let good_block = get_good_dummy_block(); @@ -122,4 +119,13 @@ fn query_bad_block() { let bad_block:Option = client.block_header_at(1); assert!(bad_block.is_none()); +} + +#[test] +fn returns_chain_info() { + let dummy_block = get_good_dummy_block(); + let client = get_test_client_with_blocks(vec![dummy_block.clone()]); + let block = BlockView::new(&dummy_block); + let info = client.chain_info(); + assert_eq!(info.best_block_hash, block.header().hash()); } \ No newline at end of file From cd57e480f51072f6920d2fe0431989ae531c7144 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 21:12:25 +0400 Subject: [PATCH 090/138] fix indentation again --- src/block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/block.rs b/src/block.rs index 31378e8a6..5691d2bf5 100644 --- a/src/block.rs +++ b/src/block.rs @@ -85,7 +85,7 @@ impl IsBlock for Block { /// /// It's a bit like a Vec, eccept that whenever a transaction is pushed, we execute it and /// maintain the system `state()`. We also archive execution receipts in preparation for later block creation. - pub struct OpenBlock<'x, 'y> { +pub struct OpenBlock<'x, 'y> { block: Block, engine: &'x Engine, last_hashes: &'y LastHashes, From f07fc497c5b0516137617d0cf40bc446293e032a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 21:14:06 +0400 Subject: [PATCH 091/138] cleanup --- src/block.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/block.rs b/src/block.rs index 5691d2bf5..1ff326430 100644 --- a/src/block.rs +++ b/src/block.rs @@ -108,7 +108,6 @@ pub struct SealedBlock { uncle_bytes: Bytes, } - impl<'x, 'y> OpenBlock<'x, 'y> { /// Create a new OpenBlock ready for transaction pushing. pub fn new<'a, 'b>(engine: &'a Engine, db: JournalDB, parent: &Header, last_hashes: &'b LastHashes, author: Address, extra_data: Bytes) -> OpenBlock<'a, 'b> { From 91828065f45cf1a543218151397f24f4707859d3 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 26 Jan 2016 22:06:11 +0400 Subject: [PATCH 092/138] fixed line spaces --- src/tests/client.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/tests/client.rs b/src/tests/client.rs index f6641ff91..eb58699b4 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -58,17 +58,13 @@ fn create_test_block(header: &Header) -> Bytes { #[cfg(test)] fn get_test_client_with_blocks(blocks: Vec) -> Arc { let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); - for block in &blocks { if let Err(_) = client.import_block(block.clone()) { panic!("panic importing block which is well-formed"); } } - client.flush_queue(); - client.import_verified_blocks(&IoChannel::disconnected()); - client } @@ -89,18 +85,14 @@ fn imports_from_empty() { #[test] fn imports_good_block() { let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); - let good_block = get_good_dummy_block(); if let Err(_) = client.import_block(good_block) { panic!("error importing block being good by definition"); } - client.flush_queue(); - client.import_verified_blocks(&IoChannel::disconnected()); let block = client.block_header_at(1).unwrap(); - assert!(!block.is_empty()); } @@ -109,7 +101,6 @@ fn query_none_block() { let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); let non_existant = client.block_header_at(188); - assert!(non_existant.is_none()); } From e904d2145f05d8ca56ee9fd7c6b3411b5f6f2218 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 26 Jan 2016 19:18:22 +0100 Subject: [PATCH 093/138] Block refactoring, stricter RLP. Fixed #234. Partially fixes #233 for Blocks. Fixed #222. --- src/block.rs | 199 +++++++++++++++++++++------------- src/engine.rs | 6 +- src/ethereum/ethash.rs | 2 +- src/header.rs | 2 +- src/log_entry.rs | 2 +- src/receipt.rs | 2 +- src/tests/chain.rs | 16 +-- src/transaction.rs | 8 +- util/src/bytes.rs | 11 +- util/src/rlp/rlperrors.rs | 2 + util/src/rlp/untrusted_rlp.rs | 3 + 11 files changed, 151 insertions(+), 102 deletions(-) diff --git a/src/block.rs b/src/block.rs index 1ff326430..83780ab43 100644 --- a/src/block.rs +++ b/src/block.rs @@ -5,80 +5,124 @@ use engine::*; use state::*; use verification::PreVerifiedBlock; -/// A transaction/receipt execution entry. -pub struct Entry { - transaction: Transaction, - receipt: Receipt, +/// A block, encoded as it is on the block chain. +// TODO: rename to Block +#[derive(Default, Debug, Clone)] +pub struct Block { + /// The header of this block. + pub header: Header, + /// The transactions in this block. + pub transactions: Vec, + /// The uncles of this block. + pub uncles: Vec
, +} + +impl Block { + /// Returns true iff the given bytes form a valid encoding of a block in RLP. + // TODO: implement Decoder for this and have this use that. + pub fn is_good(b: &[u8]) -> bool { + /* + let urlp = UntrustedRlp::new(&b); + if !urlp.is_list() || urlp.item_count() != 3 || urlp.size() != b.len() { return false; } + if urlp.val_at::
(0).is_err() { return false; } + + if !urlp.at(1).unwrap().is_list() { return false; } + if urlp.at(1).unwrap().iter().find(|i| i.as_val::().is_err()).is_some() { + return false; + } + + if !urlp.at(2).unwrap().is_list() { return false; } + if urlp.at(2).unwrap().iter().find(|i| i.as_val::
().is_err()).is_some() { + return false; + } + true*/ + UntrustedRlp::new(b).as_val::().is_ok() + } +} + +impl Decodable for Block { + fn decode(decoder: &D) -> Result where D: Decoder { + if decoder.as_raw().len() != try!(decoder.as_rlp().payload_info()).total() { + return Err(DecoderError::RlpIsTooBig); + } + let d = try!(decoder.as_list()); + if d.len() != 3 { + return Err(DecoderError::RlpIncorrectListLen); + } + Ok(Block { + header: try!(Decodable::decode(&d[0])), + transactions: try!(Decodable::decode(&d[1])), + uncles: try!(Decodable::decode(&d[2])), + }) + } } /// Internal type for a block's common elements. -pub struct Block { - header: Header, +// TODO: rename to ExecutedBlock +// TODO: use BareBlock +#[derive(Debug, Clone)] +pub struct ExecutedBlock { + base: Block, - /// State is the most final state in the block. + receipts: Vec, + transactions_set: HashSet, state: State, - - archive: Vec, - archive_set: HashSet, - - uncles: Vec
, } -/// A set of references to `Block` fields that are publicly accessible. +/// A set of references to `ExecutedBlock` fields that are publicly accessible. pub struct BlockRefMut<'a> { /// TODO [Gav Wood] Please document me pub header: &'a Header, /// TODO [Gav Wood] Please document me - pub state: &'a mut State, - /// TODO [Gav Wood] Please document me - pub archive: &'a Vec, + pub transactions: &'a Vec, /// TODO [Gav Wood] Please document me pub uncles: &'a Vec
, + + /// TODO [Gav Wood] Please document me + pub receipts: &'a Vec, + /// TODO [Gav Wood] Please document me + pub state: &'a mut State, } -impl Block { +impl ExecutedBlock { /// Create a new block from the given `state`. - fn new(state: State) -> Block { - Block { - header: Header::new(), - state: state, - archive: Vec::new(), - archive_set: HashSet::new(), - uncles: Vec::new(), - } - } + fn new(state: State) -> ExecutedBlock { ExecutedBlock { base: Default::default(), receipts: Default::default(), transactions_set: Default::default(), state: state } } /// Get a structure containing individual references to all public fields. pub fn fields(&mut self) -> BlockRefMut { BlockRefMut { - header: &self.header, + header: &self.base.header, + transactions: &self.base.transactions, + uncles: &self.base.uncles, state: &mut self.state, - archive: &self.archive, - uncles: &self.uncles, + receipts: &self.receipts, } } } -/// Trait for a object that is_a `Block`. +/// Trait for a object that is_a `ExecutedBlock`. pub trait IsBlock { /// Get the block associated with this object. - fn block(&self) -> &Block; + fn block(&self) -> &ExecutedBlock; /// Get the header associated with this object's block. - fn header(&self) -> &Header { &self.block().header } + fn header(&self) -> &Header { &self.block().base.header } /// Get the final state associated with this object's block. fn state(&self) -> &State { &self.block().state } /// Get all information on transactions in this block. - fn archive(&self) -> &Vec { &self.block().archive } + fn transactions(&self) -> &Vec { &self.block().base.transactions } + + /// Get all information on receipts in this block. + fn receipts(&self) -> &Vec { &self.block().receipts } /// Get all uncles in this block. - fn uncles(&self) -> &Vec
{ &self.block().uncles } + fn uncles(&self) -> &Vec
{ &self.block().base.uncles } } -impl IsBlock for Block { - fn block(&self) -> &Block { self } +impl IsBlock for ExecutedBlock { + fn block(&self) -> &ExecutedBlock { self } } /// Block that is ready for transactions to be added. @@ -86,7 +130,7 @@ impl IsBlock for Block { /// It's a bit like a Vec, eccept that whenever a transaction is pushed, we execute it and /// maintain the system `state()`. We also archive execution receipts in preparation for later block creation. pub struct OpenBlock<'x, 'y> { - block: Block, + block: ExecutedBlock, engine: &'x Engine, last_hashes: &'y LastHashes, } @@ -104,7 +148,7 @@ pub struct ClosedBlock<'x, 'y> { /// /// The block's header has valid seal arguments. The block cannot be reversed into a ClosedBlock or OpenBlock. pub struct SealedBlock { - block: Block, + block: ExecutedBlock, uncle_bytes: Bytes, } @@ -112,42 +156,42 @@ impl<'x, 'y> OpenBlock<'x, 'y> { /// Create a new OpenBlock ready for transaction pushing. pub fn new<'a, 'b>(engine: &'a Engine, db: JournalDB, parent: &Header, last_hashes: &'b LastHashes, author: Address, extra_data: Bytes) -> OpenBlock<'a, 'b> { let mut r = OpenBlock { - block: Block::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), + block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, last_hashes: last_hashes, }; - r.block.header.set_number(parent.number() + 1); - r.block.header.set_author(author); - r.block.header.set_extra_data(extra_data); - r.block.header.set_timestamp_now(); + r.block.base.header.set_number(parent.number() + 1); + r.block.base.header.set_author(author); + r.block.base.header.set_extra_data(extra_data); + r.block.base.header.set_timestamp_now(); - engine.populate_from_parent(&mut r.block.header, parent); + engine.populate_from_parent(&mut r.block.base.header, parent); engine.on_new_block(&mut r.block); r } /// Alter the author for the block. - pub fn set_author(&mut self, author: Address) { self.block.header.set_author(author); } + pub fn set_author(&mut self, author: Address) { self.block.base.header.set_author(author); } /// Alter the timestamp of the block. - pub fn set_timestamp(&mut self, timestamp: u64) { self.block.header.set_timestamp(timestamp); } + pub fn set_timestamp(&mut self, timestamp: u64) { self.block.base.header.set_timestamp(timestamp); } /// Alter the difficulty for the block. - pub fn set_difficulty(&mut self, a: U256) { self.block.header.set_difficulty(a); } + pub fn set_difficulty(&mut self, a: U256) { self.block.base.header.set_difficulty(a); } /// Alter the gas limit for the block. - pub fn set_gas_limit(&mut self, a: U256) { self.block.header.set_gas_limit(a); } + pub fn set_gas_limit(&mut self, a: U256) { self.block.base.header.set_gas_limit(a); } /// Alter the gas limit for the block. - pub fn set_gas_used(&mut self, a: U256) { self.block.header.set_gas_used(a); } + pub fn set_gas_used(&mut self, a: U256) { self.block.base.header.set_gas_used(a); } /// Alter the extra_data for the block. pub fn set_extra_data(&mut self, extra_data: Bytes) -> Result<(), BlockError> { if extra_data.len() > self.engine.maximum_extra_data_size() { Err(BlockError::ExtraDataOutOfBounds(OutOfBounds{min: None, max: Some(self.engine.maximum_extra_data_size()), found: extra_data.len()})) } else { - self.block.header.set_extra_data(extra_data); + self.block.base.header.set_extra_data(extra_data); Ok(()) } } @@ -157,12 +201,12 @@ impl<'x, 'y> OpenBlock<'x, 'y> { /// NOTE Will check chain constraints and the uncle number but will NOT check /// that the header itself is actually valid. pub fn push_uncle(&mut self, valid_uncle_header: Header) -> Result<(), BlockError> { - if self.block.uncles.len() >= self.engine.maximum_uncle_count() { - return Err(BlockError::TooManyUncles(OutOfBounds{min: None, max: Some(self.engine.maximum_uncle_count()), found: self.block.uncles.len()})); + if self.block.base.uncles.len() >= self.engine.maximum_uncle_count() { + return Err(BlockError::TooManyUncles(OutOfBounds{min: None, max: Some(self.engine.maximum_uncle_count()), found: self.block.base.uncles.len()})); } // TODO: check number // TODO: check not a direct ancestor (use last_hashes for that) - self.block.uncles.push(valid_uncle_header); + self.block.base.uncles.push(valid_uncle_header); Ok(()) } @@ -170,13 +214,13 @@ impl<'x, 'y> OpenBlock<'x, 'y> { pub fn env_info(&self) -> EnvInfo { // TODO: memoise. EnvInfo { - number: self.block.header.number, - author: self.block.header.author.clone(), - timestamp: self.block.header.timestamp, - difficulty: self.block.header.difficulty.clone(), + number: self.block.base.header.number, + author: self.block.base.header.author.clone(), + timestamp: self.block.base.header.timestamp, + difficulty: self.block.base.header.difficulty.clone(), last_hashes: self.last_hashes.clone(), // TODO: should be a reference. - gas_used: self.block.archive.last().map_or(U256::zero(), |t| t.receipt.gas_used), - gas_limit: self.block.header.gas_limit.clone(), + gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used), + gas_limit: self.block.base.header.gas_limit.clone(), } } @@ -188,9 +232,10 @@ impl<'x, 'y> OpenBlock<'x, 'y> { // info!("env_info says gas_used={}", env_info.gas_used); match self.block.state.apply(&env_info, self.engine, &t) { Ok(receipt) => { - self.block.archive_set.insert(h.unwrap_or_else(||t.hash())); - self.block.archive.push(Entry { transaction: t, receipt: receipt }); - Ok(&self.block.archive.last().unwrap().receipt) + self.block.transactions_set.insert(h.unwrap_or_else(||t.hash())); + self.block.base.transactions.push(t); + self.block.receipts.push(receipt); + Ok(&self.block.receipts.last().unwrap()) } Err(x) => Err(From::from(x)) } @@ -200,25 +245,25 @@ impl<'x, 'y> OpenBlock<'x, 'y> { pub fn close(self) -> ClosedBlock<'x, 'y> { let mut s = self; s.engine.on_close_block(&mut s.block); - s.block.header.transactions_root = ordered_trie_root(s.block.archive.iter().map(|ref e| e.transaction.rlp_bytes()).collect()); - let uncle_bytes = s.block.uncles.iter().fold(RlpStream::new_list(s.block.uncles.len()), |mut s, u| {s.append(&u.rlp(Seal::With)); s} ).out(); - s.block.header.uncles_hash = uncle_bytes.sha3(); - s.block.header.state_root = s.block.state.root().clone(); - s.block.header.receipts_root = ordered_trie_root(s.block.archive.iter().map(|ref e| e.receipt.rlp_bytes()).collect()); - s.block.header.log_bloom = s.block.archive.iter().fold(LogBloom::zero(), |mut b, e| {b |= &e.receipt.log_bloom; b}); - s.block.header.gas_used = s.block.archive.last().map_or(U256::zero(), |t| t.receipt.gas_used); - s.block.header.note_dirty(); + s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|ref e| e.rlp_bytes()).collect()); + let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append(&u.rlp(Seal::With)); s} ).out(); + s.block.base.header.uncles_hash = uncle_bytes.sha3(); + s.block.base.header.state_root = s.block.state.root().clone(); + s.block.base.header.receipts_root = ordered_trie_root(s.block.receipts.iter().map(|ref r| r.rlp_bytes()).collect()); + s.block.base.header.log_bloom = s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b |= &r.log_bloom; b}); + s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used); + s.block.base.header.note_dirty(); ClosedBlock::new(s, uncle_bytes) } } impl<'x, 'y> IsBlock for OpenBlock<'x, 'y> { - fn block(&self) -> &Block { &self.block } + fn block(&self) -> &ExecutedBlock { &self.block } } impl<'x, 'y> IsBlock for ClosedBlock<'x, 'y> { - fn block(&self) -> &Block { &self.open_block.block } + fn block(&self) -> &ExecutedBlock { &self.open_block.block } } impl<'x, 'y> ClosedBlock<'x, 'y> { @@ -240,7 +285,7 @@ impl<'x, 'y> ClosedBlock<'x, 'y> { if seal.len() != s.open_block.engine.seal_fields() { return Err(BlockError::InvalidSealArity(Mismatch{expected: s.open_block.engine.seal_fields(), found: seal.len()})); } - s.open_block.block.header.set_seal(seal); + s.open_block.block.base.header.set_seal(seal); Ok(SealedBlock { block: s.open_block.block, uncle_bytes: s.uncle_bytes }) } @@ -255,9 +300,9 @@ impl SealedBlock { /// Get the RLP-encoding of the block. pub fn rlp_bytes(&self) -> Bytes { let mut block_rlp = RlpStream::new_list(3); - self.block.header.stream_rlp(&mut block_rlp, Seal::With); - block_rlp.append_list(self.block.archive.len()); - for e in &self.block.archive { e.transaction.rlp_append(&mut block_rlp); } + self.block.base.header.stream_rlp(&mut block_rlp, Seal::With); + block_rlp.append_list(self.block.receipts.len()); + for t in &self.block.base.transactions { t.rlp_append(&mut block_rlp); } block_rlp.append_raw(&self.uncle_bytes, 1); block_rlp.out() } @@ -267,7 +312,7 @@ impl SealedBlock { } impl IsBlock for SealedBlock { - fn block(&self) -> &Block { &self.block } + fn block(&self) -> &ExecutedBlock { &self.block } } /// Enact the block given by block header, transactions and uncles diff --git a/src/engine.rs b/src/engine.rs index d94797290..1fb6ef0ca 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -1,5 +1,5 @@ use common::*; -use block::Block; +use block::ExecutedBlock; use spec::Spec; use evm::Schedule; use evm::Factory; @@ -37,9 +37,9 @@ pub trait Engine : Sync + Send { fn account_start_nonce(&self) -> U256 { decode(&self.spec().engine_params.get("accountStartNonce").unwrap()) } /// Block transformation functions, before and after the transactions. - fn on_new_block(&self, _block: &mut Block) {} + fn on_new_block(&self, _block: &mut ExecutedBlock) {} /// TODO [Gav Wood] Please document me - fn on_close_block(&self, _block: &mut Block) {} + fn on_close_block(&self, _block: &mut ExecutedBlock) {} // TODO: consider including State in the params for verification functions. /// Phase 1 quick block verification. Only does checks that are cheap. `block` (the header's full block) diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index 057a67d20..a677a86cc 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -83,7 +83,7 @@ impl Engine for Ethash { /// Apply the block reward on finalisation of the block. /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). - fn on_close_block(&self, block: &mut Block) { + fn on_close_block(&self, block: &mut ExecutedBlock) { let reward = self.spec().engine_params.get("blockReward").map_or(U256::from(0u64), |a| decode(&a)); let fields = block.fields(); diff --git a/src/header.rs b/src/header.rs index 68526d8c6..a57d4166d 100644 --- a/src/header.rs +++ b/src/header.rs @@ -11,7 +11,7 @@ pub type BlockNumber = u64; /// which is non-specific. /// /// Doesn't do all that much on its own. -#[derive(Debug, Clone)] +#[derive(Default, Debug, Clone)] pub struct Header { // TODO: make all private. /// TODO [Gav Wood] Please document me diff --git a/src/log_entry.rs b/src/log_entry.rs index a791b38a6..be39a72f2 100644 --- a/src/log_entry.rs +++ b/src/log_entry.rs @@ -2,7 +2,7 @@ use util::*; use basic_types::LogBloom; /// A single log's entry. -#[derive(Debug,PartialEq,Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct LogEntry { /// TODO [Gav Wood] Please document me pub address: Address, diff --git a/src/receipt.rs b/src/receipt.rs index 403915fdc..4389de962 100644 --- a/src/receipt.rs +++ b/src/receipt.rs @@ -3,7 +3,7 @@ use basic_types::LogBloom; use log_entry::LogEntry; /// Information describing execution of a transaction. -#[derive(Debug)] +#[derive(Default, Debug, Clone)] pub struct Receipt { /// TODO [Gav Wood] Please document me pub state_root: H256, diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 8be1b1c92..ec899c73b 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -2,6 +2,7 @@ use std::env; use super::test_common::*; use client::{BlockChainClient,Client}; use pod_state::*; +use block::Block; use ethereum; fn do_json_test(json_data: &[u8]) -> Vec { @@ -31,20 +32,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { dir.push(H32::random().hex()); { let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); - for b in blocks.into_iter() { - { - let urlp = UntrustedRlp::new(&b); - if !urlp.is_list() || urlp.item_count() != 3 || urlp.size() != b.len() { continue; } - if urlp.val_at::
(0).is_err() { continue; } - if !urlp.at(1).unwrap().is_list() { continue; } - if urlp.at(1).unwrap().iter().find(|i| i.as_val::().is_err()).is_some() { - continue; - } - if !urlp.at(2).unwrap().is_list() { continue; } - if urlp.at(2).unwrap().iter().find(|i| i.as_val::
().is_err()).is_some() { - continue; - } - } + for b in blocks.into_iter().filter(|ref b| Block::is_good(b)) { client.import_block(b).unwrap(); } client.flush_queue(); diff --git a/src/transaction.rs b/src/transaction.rs index 47b8fa91f..cacdf2a25 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -3,7 +3,7 @@ use basic_types::*; use error::*; use evm::Schedule; -#[derive(Debug,Clone)] +#[derive(Debug, Clone)] /// TODO [Gav Wood] Please document me pub enum Action { /// TODO [Gav Wood] Please document me @@ -12,9 +12,13 @@ pub enum Action { Call(Address), } +impl Default for Action { + fn default() -> Action { Action::Create } +} + /// A set of information describing an externally-originating message call /// or contract creation operation. -#[derive(Debug,Clone)] +#[derive(Default, Debug, Clone)] pub struct Transaction { /// TODO [debris] Please document me pub nonce: U256, diff --git a/util/src/bytes.rs b/util/src/bytes.rs index a30581a1f..3144dd482 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -273,7 +273,9 @@ pub enum FromBytesError { /// TODO [debris] Please document me DataIsTooShort, /// TODO [debris] Please document me - DataIsTooLong + DataIsTooLong, + /// Integer-representation is non-canonically prefixed with zero byte(s). + ZeroPrefixedInt, } impl StdError for FromBytesError { @@ -310,6 +312,9 @@ macro_rules! impl_uint_from_bytes { match bytes.len() { 0 => Ok(0), l if l <= mem::size_of::<$to>() => { + if bytes[0] == 0 { + return Err(FromBytesError::ZeroPrefixedInt) + } let mut res = 0 as $to; for i in 0..l { let shift = (l - 1 - i) * 8; @@ -344,7 +349,9 @@ macro_rules! impl_uint_from_bytes { ($name: ident) => { impl FromBytes for $name { fn from_bytes(bytes: &[u8]) -> FromBytesResult<$name> { - if bytes.len() <= $name::SIZE { + if !bytes.is_empty() && bytes[0] == 0 { + Err(FromBytesError::ZeroPrefixedInt) + } else if bytes.len() <= $name::SIZE { Ok($name::from(bytes)) } else { Err(FromBytesError::DataIsTooLong) diff --git a/util/src/rlp/rlperrors.rs b/util/src/rlp/rlperrors.rs index 97adbced1..8946098bd 100644 --- a/util/src/rlp/rlperrors.rs +++ b/util/src/rlp/rlperrors.rs @@ -7,6 +7,8 @@ use bytes::FromBytesError; pub enum DecoderError { /// TODO [debris] Please document me FromBytesError(FromBytesError), + /// Given data has additional bytes at the end of the valid RLP fragment. + RlpIsTooBig, /// TODO [debris] Please document me RlpIsTooShort, /// TODO [debris] Please document me diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index 768d058c1..d7921a25b 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -46,6 +46,9 @@ impl PayloadInfo { value_len: value_len, } } + + /// Total size of the RLP. + pub fn total(&self) -> usize { self.header_len + self.value_len } } /// Data-oriented view onto rlp-slice. From 2f42e0eda0af6207d3d7d59b007af02256d88806 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 26 Jan 2016 19:24:33 +0100 Subject: [PATCH 094/138] parity on netstats --- rpc/Cargo.toml | 1 - rpc/src/impls/eth.rs | 20 ++++++++++++++------ rpc/src/lib.rs | 3 ++- rpc/src/traits/eth.rs | 1 + rpc/src/types/block.rs | 6 +++--- rpc/src/types/mod.rs | 17 +++++++++++++++++ util/Cargo.toml | 1 + util/src/hash.rs | 37 +++++++++++++++++++++++++++++++++++++ util/src/lib.rs | 1 + util/src/uint.rs | 13 +++++++++++++ 10 files changed, 89 insertions(+), 11 deletions(-) diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index d0f9f50e8..ccdd46679 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -8,7 +8,6 @@ authors = ["Marek Kotewicz , @@ -27,7 +28,7 @@ impl Eth for EthClient { fn author(&self, params: Params) -> Result { match params { - Params::None => Ok(Value::String(Address::new().to_hex())), + Params::None => Ok(as_value(&Address::new())), _ => Err(Error::invalid_params()) } } @@ -64,8 +65,15 @@ impl Eth for EthClient { Ok(Value::U64(0)) } - fn block(&self, _: Params) -> Result { - Ok(Value::Null) + fn block(&self, params: Params) -> Result { + if let Params::Array(ref arr) = params { + if let [ref h, Value::Bool(ref include_transactions)] = arr as &[Value] { + if let Ok(hash) = from_value::(h.clone()) { + return Ok(as_value(&Block::default())) + } + } + } + Err(Error::invalid_params()) } } @@ -91,6 +99,6 @@ impl EthFilter for EthFilterClient { } fn filter_changes(&self, _: Params) -> Result { - Ok(Value::Array(vec![Value::String(self.client.chain_info().best_block_hash.to_hex())])) + Ok(Value::Array(vec![as_value(&self.client.chain_info().best_block_hash)])) } } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 148e9f134..43a24a1fb 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -1,8 +1,9 @@ #![feature(custom_derive, custom_attribute, plugin)] +#![feature(slice_patterns)] #![plugin(serde_macros)] -extern crate rustc_serialize; extern crate serde; +extern crate serde_json; extern crate jsonrpc_core; extern crate jsonrpc_http_server; extern crate ethcore_util as util; diff --git a/rpc/src/traits/eth.rs b/rpc/src/traits/eth.rs index 31e9df164..35b59a91c 100644 --- a/rpc/src/traits/eth.rs +++ b/rpc/src/traits/eth.rs @@ -35,6 +35,7 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_coinbase", Eth::author); delegate.add_method("eth_gasPrice", Eth::gas_price); delegate.add_method("eth_blockNumber", Eth::block_number); + delegate.add_method("eth_getBlockByHash", Eth::block); delegate.add_method("eth_getBlockByNumber", Eth::block); delegate.add_method("eth_mining", Eth::is_mining); delegate.add_method("eth_hashrate", Eth::hashrate); diff --git a/rpc/src/types/block.rs b/rpc/src/types/block.rs index c15c8186d..7b23e2e0c 100644 --- a/rpc/src/types/block.rs +++ b/rpc/src/types/block.rs @@ -1,7 +1,7 @@ use util::hash::*; use util::uint::*; -#[derive(Default)] +#[derive(Default, Serialize)] pub struct Block { hash: H256, #[serde(rename="parentHash")] @@ -35,7 +35,7 @@ fn test_block_serialize() { use serde_json; let block = Block::default(); - //let serialized = serde_json::to_string(&block).unwrap(); - //println!("s: {:?}", serialized); + let serialized = serde_json::to_string(&block).unwrap(); + println!("s: {:?}", serialized); //assert!(false); } diff --git a/rpc/src/types/mod.rs b/rpc/src/types/mod.rs index fc9210db1..a2f1da63e 100644 --- a/rpc/src/types/mod.rs +++ b/rpc/src/types/mod.rs @@ -1 +1,18 @@ +use serde::{Serialize, Deserialize, de}; +use serde_json::value::{Value, Serializer, Deserializer}; + mod block; + +pub fn as_value(s: &S) -> Value where S: Serialize { + let mut serializer = Serializer::new(); + // should never panic! + s.serialize(&mut serializer).unwrap(); + serializer.unwrap() +} + +pub fn from_value(value: Value) -> Result::Error> where D: Deserialize { + let mut deserialier = Deserializer::new(value); + Deserialize::deserialize(&mut deserialier) +} + +pub use self::block::Block; diff --git a/util/Cargo.toml b/util/Cargo.toml index a91bff962..362db33b2 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -25,6 +25,7 @@ itertools = "0.4" crossbeam = "0.2" slab = { git = "https://github.com/arkpar/slab.git" } sha3 = { path = "sha3" } +serde = "0.6.7" clippy = "*" # Always newest, since we use nightly [dev-dependencies] diff --git a/util/src/hash.rs b/util/src/hash.rs index 0e4139f3c..011746028 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -8,6 +8,8 @@ use rand::os::OsRng; use bytes::{BytesConvertable,Populatable}; use from_json::*; use uint::{Uint, U256}; +use rustc_serialize::hex::ToHex; +use serde; /// Trait for a fixed-size byte array to be used as the output of hash functions. /// @@ -205,6 +207,41 @@ macro_rules! impl_hash { } } + impl serde::Serialize for $from { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: serde::Serializer { + let mut hex = "0x".to_owned(); + hex.push_str(self.to_hex().as_ref()); + serializer.visit_str(hex.as_ref()) + } + } + + impl serde::Deserialize for $from { + fn deserialize(deserializer: &mut D) -> Result<$from, D::Error> + where D: serde::Deserializer { + struct HashVisitor; + + impl serde::de::Visitor for HashVisitor { + type Value = $from; + + fn visit_str(&mut self, value: &str) -> Result where E: serde::Error { + // 0x + len + if value.len() != 2 + $size * 2 { + return Err(serde::Error::syntax("Invalid length.")); + } + + value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::syntax("Invalid valid hex.")) + } + + fn visit_string(&mut self, value: String) -> Result where E: serde::Error { + self.visit_str(value.as_ref()) + } + } + + deserializer.visit(HashVisitor) + } + } + impl FromJson for $from { fn from_json(json: &Json) -> Self { match *json { diff --git a/util/src/lib.rs b/util/src/lib.rs index 970c0713c..b1b93968c 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -55,6 +55,7 @@ extern crate secp256k1; extern crate arrayvec; extern crate elastic_array; extern crate crossbeam; +extern crate serde; /// TODO [Gav Wood] Please document me pub mod standard; diff --git a/util/src/uint.rs b/util/src/uint.rs index ab136d7c6..de05ca4a8 100644 --- a/util/src/uint.rs +++ b/util/src/uint.rs @@ -23,6 +23,8 @@ use standard::*; use from_json::*; +use rustc_serialize::hex::ToHex; +use serde; macro_rules! impl_map_from { ($thing:ident, $from:ty, $to:ty) => { @@ -436,6 +438,17 @@ macro_rules! construct_uint { } } + impl serde::Serialize for $name { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: serde::Serializer { + let mut hex = "0x".to_owned(); + let mut bytes = [0u8; 8 * $n_words]; + self.to_bytes(&mut bytes); + hex.push_str(bytes.to_hex().as_ref()); + serializer.visit_str(hex.as_ref()) + } + } + impl From for $name { fn from(value: u64) -> $name { let mut ret = [0; $n_words]; From 4e62b5201087eb5c191ddbf1eaf581f12a3d4b96 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 04:24:32 +0400 Subject: [PATCH 095/138] blockchain cache tests --- src/blockchain.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/blockchain.rs b/src/blockchain.rs index 3bd31688a..16e75b08d 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -628,6 +628,25 @@ mod tests { use util::hash::*; use blockchain::*; + fn get_populated_bc() -> BlockChain { + let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07dba07d6b448a186e9612e5f737d1c909dce473e53199901a302c00646d523c1a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a059262c330941f3fe2a34d16d6e3c7b30d2ceb37c6a0e9a994c494ee1a61d2410885aa4c8bf8e56e264c0c0".from_hex().unwrap(); + let b1 = "f90261f901f9a05716670833ec874362d65fea27a7cd35af5897d275b31a44944113111e4e96d2a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cb52de543653d86ccd13ba3ddf8b052525b04231c6884a4db3188a184681d878a0e78628dd45a1f8dc495594d83b76c588a3ee67463260f8b7d4a42f574aeab29aa0e9244cf7503b79c03d3a099e07a80d2dbc77bb0b502d8a89d51ac0d68dd31313b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd882520884562791e580a051b3ecba4e3f2b49c11d42dd0851ec514b1be3138080f72a2b6e83868275d98f8877671f479c414b47f862f86080018304cb2f94095e7baea6a6c7c4c2dfeb977efac326af552d870a801ca09e2709d7ec9bbe6b1bbbf0b2088828d14cd5e8642a1fee22dc74bfa89761a7f9a04bd8813dee4be989accdb708b1c2e325a7e9c695a8024e30e89d6c644e424747c0".from_hex().unwrap(); + let b2 = "f902ccf901f9a0437e51676ff10756fcfee5edd9159fa41dbcb1b2c592850450371cbecd54ee4fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c70a5dc56146e5ef025e4e5726a6373c6f12fd2f6784093a19ead0a7d17fb292a040645cbce4fd399e7bb9160b4c30c40d7ee616a030d4e18ef0ed3b02bdb65911a086e608555f63628417032a011d107b36427af37d153f0da02ce3f90fdd5e8c08b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd882c0e384562791e880a0e3cc39ff775cc0a32f175995b92e84b729e5c9a3563ff899e3555b908bc21d75887c3cde283f4846a6f8cdf8cb01018304cb2f8080b87e6060604052606e8060106000396000f360606040526000357c010000000000000000000000000000000000000000000000000000000090048063c0406226146037576035565b005b60406004506056565b6040518082815260200191505060405180910390f35b6000600560006000508190555060059050606b565b90561ba05258615c63503c0a600d6994b12ea5750d45b3c69668e2a371b4fbfb9eeff6b8a0a11be762bc90491231274a2945be35a43f23c27775b1ff24dd521702fe15f73ec0".from_hex().unwrap(); + let b3a = "f90261f901f9a036fde1253128666fcb95a5956da14a73489e988bb72738717ec1d31e1cee781aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a09dc4b1357c0b7b8108f8a098f4f9a1a274957bc9ebc22a9ae67ae81739e5b19ca007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefd882524d84562791eb80a074861666bd346c025889745c793b91ab9cd1e2ca19b5cf3c50d04d135b0a4d2b8809fe9587ea4cdc04f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba06fd84874d36d5de9e8e48978c03619b53a96b7ae0a4cd1ac118f103098b44801a00572596974dd7df4f9f69bd7456585618c568d8434ef6453391b89281ce12ae1c0".from_hex().unwrap(); + let b3b = "f90265f901f9a036fde1253128666fcb95a5956da14a73489e988bb72738717ec1d31e1cee781aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ab87dc338bfd6f662b1cd90bc0c9e40a1b2146a095312393c9e13ce3a5008b09a0e609b7a7d4b8a2403ec1268627ecd98783627246e8f1b26addb3ff504f76a054a0592fabf92476512952db3a69a2481a42912e668a1ee28c4c322e703bb665f8beb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefd882a1f084562791ee80a0fe7098fa7e4ac5d637eea81fb23f8f78346826dbab430068dd9a249d0afa99818853e1a6b201ae3545f866f86402018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d0284c04062261ca06edc9ce8e7da4cc34067beb325dcad59e5655a164a5100a50bc3eb681b12c716a0abf9053d5de65b1be81fe50d327b84de685efbeecea34e7b747180a6c6023e44c0".from_hex().unwrap(); + + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + + let bc = BlockChain::new(&genesis, &dir); + bc.insert_block(&b1); + bc.insert_block(&b2); + bc.insert_block(&b3a); + bc.insert_block(&b3b); + + bc + } + #[test] fn valid_tests_extra32() { let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0925002c3260b44e44c3edebad1cc442142b03020209df1ab8bb86752edbd2cd7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a0363659b251bf8b819179874c8cce7b9b983d7f3704cbb58a3b334431f7032871889032d09c281e1236c0c0".from_hex().unwrap(); @@ -773,4 +792,11 @@ mod tests { assert_eq!(bc.best_block_hash(), b1_hash); } } + + #[test] + fn blocks_get_garbage_collected() { + let bc = get_populated_bc(); + bc.squeeze_to_fit(CacheSize { blocks: 0, block_logs: 0, transaction_addresses: 0, block_details: 0, blocks_blooms: 0 } ); + assert_eq!(bc.cache_size().blocks, 0); + } } From 8d8782056a3e5d4166f55c1f007767ca26e9f1b5 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 13:22:51 +0400 Subject: [PATCH 096/138] failing test for sqeeze to fit code --- src/blockchain.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/blockchain.rs b/src/blockchain.rs index dcc348440..3f55f3ea1 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -803,5 +803,8 @@ mod tests { let bc = get_populated_bc(); bc.squeeze_to_fit(CacheSize { blocks: 0, block_logs: 0, transaction_addresses: 0, block_details: 0, blocks_blooms: 0 } ); assert_eq!(bc.cache_size().blocks, 0); + assert_eq!(bc.cache_size().block_details, 0); + assert_eq!(bc.cache_size().block_logs, 0); + assert_eq!(bc.cache_size().blocks_blooms, 0); } } From b4263c1755a63d55f2cd60e3484458728f778f02 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 13:38:47 +0400 Subject: [PATCH 097/138] squeeze/garbage --- src/blockchain.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/blockchain.rs b/src/blockchain.rs index 3f55f3ea1..40abb1c8d 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -799,12 +799,20 @@ mod tests { } #[test] - fn blocks_get_garbage_collected() { + fn can_be_squeezed() { let bc = get_populated_bc(); bc.squeeze_to_fit(CacheSize { blocks: 0, block_logs: 0, transaction_addresses: 0, block_details: 0, blocks_blooms: 0 } ); - assert_eq!(bc.cache_size().blocks, 0); - assert_eq!(bc.cache_size().block_details, 0); + assert_eq!(bc.cache_size().blocks, 1624); + assert_eq!(bc.cache_size().block_details, 3712); assert_eq!(bc.cache_size().block_logs, 0); assert_eq!(bc.cache_size().blocks_blooms, 0); } + + #[test] + fn can_collect_garbage() { + let bc = get_populated_bc(); + bc.collect_garbage(false); + assert_eq!(bc.cache_size().blocks, 1624); + + } } From d2229f519ddfd8ec0cfdcd69b6f2fe9f73647dc5 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 14:50:39 +0400 Subject: [PATCH 098/138] test helpers initial --- src/tests/helpers.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/tests/helpers.rs diff --git a/src/tests/helpers.rs b/src/tests/helpers.rs new file mode 100644 index 000000000..e69de29bb From dddbf589451a61daeecd122d4e83e6ae3b9e3165 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 14:50:48 +0400 Subject: [PATCH 099/138] test helpers initial --- src/tests/client.rs | 23 +++++-------------- src/tests/helpers.rs | 54 ++++++++++++++++++++++++++++++++++++++++++++ src/tests/mod.rs | 1 + 3 files changed, 61 insertions(+), 17 deletions(-) diff --git a/src/tests/client.rs b/src/tests/client.rs index eb58699b4..57ab6e577 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -3,18 +3,7 @@ use std::env; use super::test_common::*; use std::path::PathBuf; use spec::*; - -#[cfg(test)] -fn get_random_temp_dir() -> PathBuf { - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - dir -} - -#[cfg(test)] -fn get_test_spec() -> Spec { - Spec::new_test() -} +use super::helpers::*; #[cfg(test)] fn get_good_dummy_block() -> Bytes { @@ -57,7 +46,7 @@ fn create_test_block(header: &Header) -> Bytes { #[cfg(test)] fn get_test_client_with_blocks(blocks: Vec) -> Arc { - let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); for block in &blocks { if let Err(_) = client.import_block(block.clone()) { panic!("panic importing block which is well-formed"); @@ -71,20 +60,20 @@ fn get_test_client_with_blocks(blocks: Vec) -> Arc { #[test] fn created() { - let client_result = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()); + let client_result = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()); assert!(client_result.is_ok()); } #[test] fn imports_from_empty() { - let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); client.import_verified_blocks(&IoChannel::disconnected()); client.flush_queue(); } #[test] fn imports_good_block() { - let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); let good_block = get_good_dummy_block(); if let Err(_) = client.import_block(good_block) { panic!("error importing block being good by definition"); @@ -98,7 +87,7 @@ fn imports_good_block() { #[test] fn query_none_block() { - let client = Client::new(get_test_spec(), &get_random_temp_dir(), IoChannel::disconnected()).unwrap(); + let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); let non_existant = client.block_header_at(188); assert!(non_existant.is_none()); diff --git a/src/tests/helpers.rs b/src/tests/helpers.rs index e69de29bb..4cbd36692 100644 --- a/src/tests/helpers.rs +++ b/src/tests/helpers.rs @@ -0,0 +1,54 @@ +use client::{BlockChainClient,Client}; +use std::env; +use super::test_common::*; +use std::path::PathBuf; +use spec::*; +use std::fs::{create_dir_all}; + +#[cfg(test)] +const FIXED_TEMP_DIR_NAME: &'static str = "parity-temp"; + + +#[cfg(test)] +pub fn get_tests_temp_dir() -> PathBuf { + let mut dir = env::temp_dir(); + dir.push(FIXED_TEMP_DIR_NAME); + if let Err(_) = create_dir_all(&dir) { + panic!("failed to create test dir!"); + } + dir +} + +#[cfg(test)] +pub fn get_random_path() -> PathBuf { + let mut dir = get_tests_temp_dir(); + dir.push(H32::random().hex()); + dir +} + + +#[cfg(test)] +pub fn get_test_spec() -> Spec { + Spec::new_test() +} + +#[cfg(test)] +pub fn generate_dummy_client(block_number: usize) { + let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); + + let mut rolling_hash = test_engine.spec().genesis_header().hash(); + let mut rolling_state = test_engine.spec().genesis_header().state_root; + let mut rolling_block_number = 1; + + for _ in 0..block_number { + let mut header = Header::new(); + + header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap()); + header.difficulty = decode(test_engine.spec().engine_params.get("minimumDifficulty").unwrap()); + header.timestamp = 40; + header.number = rolling_block_number; + header.parent_hash = test_engine.spec().genesis_header().hash(); + header.state_root = test_engine.spec().genesis_header().state_root; + } + +} \ No newline at end of file diff --git a/src/tests/mod.rs b/src/tests/mod.rs index 290c2e293..17da72d77 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -6,3 +6,4 @@ mod executive; mod state; mod client; mod chain; +mod helpers; \ No newline at end of file From 58651392065f67071991cbb587cf71aa48ca8516 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 12:31:54 +0100 Subject: [PATCH 100/138] block visible on netstats --- rpc/src/impls/eth.rs | 39 +++++++++++++++++++++++++++++++++------ rpc/src/impls/web3.rs | 3 ++- rpc/src/types/block.rs | 33 +++++++++++++++++++-------------- rpc/src/types/mod.rs | 2 +- src/client.rs | 8 ++++++++ src/externalities.rs | 31 ++++++++++--------------------- 6 files changed, 73 insertions(+), 43 deletions(-) diff --git a/rpc/src/impls/eth.rs b/rpc/src/impls/eth.rs index 54372ab3a..f0ad73c57 100644 --- a/rpc/src/impls/eth.rs +++ b/rpc/src/impls/eth.rs @@ -1,10 +1,12 @@ use std::sync::Arc; -use serde_json; use jsonrpc_core::*; use util::hash::*; +use util::uint::*; +use util::sha3::*; use ethcore::client::*; +use ethcore::views::*; use traits::{Eth, EthFilter}; -use types::{Block, as_value, from_value}; +use types::{Block, to_value, from_value}; pub struct EthClient { client: Arc, @@ -28,7 +30,7 @@ impl Eth for EthClient { fn author(&self, params: Params) -> Result { match params { - Params::None => Ok(as_value(&Address::new())), + Params::None => Ok(to_value(&Address::new())), _ => Err(Error::invalid_params()) } } @@ -67,9 +69,34 @@ impl Eth for EthClient { fn block(&self, params: Params) -> Result { if let Params::Array(ref arr) = params { - if let [ref h, Value::Bool(ref include_transactions)] = arr as &[Value] { + if let [ref h, Value::Bool(ref _include_txs)] = arr as &[Value] { if let Ok(hash) = from_value::(h.clone()) { - return Ok(as_value(&Block::default())) + return match (self.client.block_header(&hash), self.client.block_details(&hash)) { + (Some(bytes), Some(details)) => { + let view = HeaderView::new(&bytes); + let block = Block { + hash: view.sha3(), + parent_hash: view.parent_hash(), + uncles_hash: view.uncles_hash(), + author: view.author(), + miner: view.author(), + state_root: view.state_root(), + transactions_root: view.transactions_root(), + receipts_root: view.receipts_root(), + number: U256::from(view.number()), + gas_used: view.gas_used(), + gas_limit: view.gas_limit(), + logs_bloom: view.log_bloom(), + timestamp: U256::from(view.timestamp()), + difficulty: view.difficulty(), + total_difficulty: details.total_difficulty, + uncles: vec![], + transactions: vec![] + }; + Ok(to_value(&block)) + }, + _ => Ok(Value::Null), + } } } } @@ -99,6 +126,6 @@ impl EthFilter for EthFilterClient { } fn filter_changes(&self, _: Params) -> Result { - Ok(Value::Array(vec![as_value(&self.client.chain_info().best_block_hash)])) + Ok(Value::Array(vec![to_value(&self.client.chain_info().best_block_hash)])) } } diff --git a/rpc/src/impls/web3.rs b/rpc/src/impls/web3.rs index 58e7858eb..50eb9c6f5 100644 --- a/rpc/src/impls/web3.rs +++ b/rpc/src/impls/web3.rs @@ -10,7 +10,8 @@ impl Web3Client { impl Web3 for Web3Client { fn client_version(&self, params: Params) -> Result { match params { - Params::None => Ok(Value::String("parity/0.1.0/-/rust1.7-nightly".to_string())), + //Params::None => Ok(Value::String("parity/0.1.0/-/rust1.7-nightly".to_owned())), + Params::None => Ok(Value::String("surprise/0.1.0/surprise/surprise".to_owned())), _ => Err(Error::invalid_params()) } } diff --git a/rpc/src/types/block.rs b/rpc/src/types/block.rs index 7b23e2e0c..ac1e673a5 100644 --- a/rpc/src/types/block.rs +++ b/rpc/src/types/block.rs @@ -1,33 +1,38 @@ use util::hash::*; use util::uint::*; -#[derive(Default, Serialize)] +#[derive(Default, Debug, Serialize)] pub struct Block { - hash: H256, + pub hash: H256, #[serde(rename="parentHash")] - parent_hash: H256, + pub parent_hash: H256, #[serde(rename="sha3Uncles")] - uncles_hash: H256, - author: Address, + pub uncles_hash: H256, + pub author: Address, // TODO: get rid of this one - miner: Address, + pub miner: Address, #[serde(rename="stateRoot")] - state_root: H256, + pub state_root: H256, #[serde(rename="transactionsRoot")] - transactions_root: H256, + pub transactions_root: H256, #[serde(rename="receiptsRoot")] - receipts_root: H256, - number: u64, + pub receipts_root: H256, + pub number: U256, #[serde(rename="gasUsed")] - gas_used: U256, + pub gas_used: U256, #[serde(rename="gasLimit")] - gas_limit: U256, + pub gas_limit: U256, // TODO: figure out how to properly serialize bytes //#[serde(rename="extraData")] //extra_data: Vec, #[serde(rename="logsBloom")] - logs_bloom: H2048, - timestamp: u64 + pub logs_bloom: H2048, + pub timestamp: U256, + pub difficulty: U256, + #[serde(rename="totalDifficulty")] + pub total_difficulty: U256, + pub uncles: Vec, + pub transactions: Vec } #[test] diff --git a/rpc/src/types/mod.rs b/rpc/src/types/mod.rs index a2f1da63e..0b7d97916 100644 --- a/rpc/src/types/mod.rs +++ b/rpc/src/types/mod.rs @@ -3,7 +3,7 @@ use serde_json::value::{Value, Serializer, Deserializer}; mod block; -pub fn as_value(s: &S) -> Value where S: Serialize { +pub fn to_value(s: &S) -> Value where S: Serialize { let mut serializer = Serializer::new(); // should never panic! s.serialize(&mut serializer).unwrap(); diff --git a/src/client.rs b/src/client.rs index 2e9029709..e8d8fc8f2 100644 --- a/src/client.rs +++ b/src/client.rs @@ -11,6 +11,7 @@ use service::NetSyncMessage; use env_info::LastHashes; use verification::*; use block::*; +use extras::BlockDetails; /// General block status #[derive(Debug)] @@ -64,6 +65,9 @@ pub trait BlockChainClient : Sync + Send { /// Get block status by block header hash. fn block_status(&self, hash: &H256) -> BlockStatus; + /// Get familial details concerning a block. + fn block_details(&self, hash: &H256) -> Option; + /// Get raw block header data by block number. fn block_header_at(&self, n: BlockNumber) -> Option; @@ -299,6 +303,10 @@ impl BlockChainClient for Client { fn block_status(&self, hash: &H256) -> BlockStatus { if self.chain.read().unwrap().is_known(&hash) { BlockStatus::InChain } else { BlockStatus::Unknown } } + + fn block_details(&self, hash: &H256) -> Option { + self.chain.read().unwrap().block_details(hash) + } fn block_header_at(&self, n: BlockNumber) -> Option { self.chain.read().unwrap().block_hash(n).and_then(|h| self.block_header(&h)) diff --git a/src/externalities.rs b/src/externalities.rs index f9a79c3c0..8b16cc72b 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -19,8 +19,7 @@ pub enum OutputPolicy<'a> { pub struct OriginInfo { address: Address, origin: Address, - gas_price: U256, - value: U256 + gas_price: U256 } impl OriginInfo { @@ -29,11 +28,7 @@ impl OriginInfo { OriginInfo { address: params.address.clone(), origin: params.origin.clone(), - gas_price: params.gas_price.clone(), - value: match params.value { - ActionValue::Transfer(val) => val, - ActionValue::Apparent(val) => val, - } + gas_price: params.gas_price.clone() } } } @@ -116,7 +111,7 @@ impl<'a> Ext for Externalities<'a> { origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: ActionValue::Transfer(value.clone()), + value: value.clone(), code: Some(code.to_vec()), data: None, }; @@ -136,29 +131,24 @@ impl<'a> Ext for Externalities<'a> { fn call(&mut self, gas: &U256, - sender_address: &Address, - receive_address: &Address, - value: Option, + address: &Address, + value: &U256, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult { - let mut params = ActionParams { - sender: sender_address.clone(), - address: receive_address.clone(), - value: ActionValue::Apparent(self.origin_info.value.clone()), + let params = ActionParams { code_address: code_address.clone(), + address: address.clone(), + sender: self.origin_info.address.clone(), origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), + value: value.clone(), code: self.state.code(code_address), data: Some(data.to_vec()), }; - if let Some(value) = value { - params.value = ActionValue::Transfer(value); - } - let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth); match ex.call(params, self.substate, BytesRef::Fixed(output)) { @@ -168,10 +158,9 @@ impl<'a> Ext for Externalities<'a> { } fn extcode(&self, address: &Address) -> Bytes { - self.state.code(address).unwrap_or_else(|| vec![]) + self.state.code(address).unwrap_or(vec![]) } - #[allow(match_ref_pats)] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { From 5998c16b17ecc454d4871e8ef0ec9e7010315c90 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 13:08:56 +0100 Subject: [PATCH 101/138] reverted incorrect changes to externalities --- src/externalities.rs | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/externalities.rs b/src/externalities.rs index e3a363fbc..f9a79c3c0 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -19,7 +19,8 @@ pub enum OutputPolicy<'a> { pub struct OriginInfo { address: Address, origin: Address, - gas_price: U256 + gas_price: U256, + value: U256 } impl OriginInfo { @@ -28,7 +29,11 @@ impl OriginInfo { OriginInfo { address: params.address.clone(), origin: params.origin.clone(), - gas_price: params.gas_price.clone() + gas_price: params.gas_price.clone(), + value: match params.value { + ActionValue::Transfer(val) => val, + ActionValue::Apparent(val) => val, + } } } } @@ -111,7 +116,7 @@ impl<'a> Ext for Externalities<'a> { origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: value.clone(), + value: ActionValue::Transfer(value.clone()), code: Some(code.to_vec()), data: None, }; @@ -131,24 +136,29 @@ impl<'a> Ext for Externalities<'a> { fn call(&mut self, gas: &U256, - address: &Address, - value: &U256, + sender_address: &Address, + receive_address: &Address, + value: Option, data: &[u8], code_address: &Address, output: &mut [u8]) -> MessageCallResult { - let params = ActionParams { + let mut params = ActionParams { + sender: sender_address.clone(), + address: receive_address.clone(), + value: ActionValue::Apparent(self.origin_info.value.clone()), code_address: code_address.clone(), - address: address.clone(), - sender: self.origin_info.address.clone(), origin: self.origin_info.origin.clone(), gas: *gas, gas_price: self.origin_info.gas_price.clone(), - value: value.clone(), code: self.state.code(code_address), data: Some(data.to_vec()), }; + if let Some(value) = value { + params.value = ActionValue::Transfer(value); + } + let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth); match ex.call(params, self.substate, BytesRef::Fixed(output)) { @@ -158,9 +168,10 @@ impl<'a> Ext for Externalities<'a> { } fn extcode(&self, address: &Address) -> Bytes { - self.state.code(address).unwrap_or(vec![]) + self.state.code(address).unwrap_or_else(|| vec![]) } + #[allow(match_ref_pats)] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { @@ -204,7 +215,6 @@ impl<'a> Ext for Externalities<'a> { fn suicide(&mut self, refund_address: &Address) { let address = self.origin_info.address.clone(); let balance = self.balance(&address); - trace!("Suiciding {} -> {} (xfer: {})", address, refund_address, balance); self.state.transfer_balance(&address, refund_address, &balance); self.substate.suicides.insert(address); } From 996db7cd29db5d641faaf979cd57ce846c0e3273 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 13:11:09 +0100 Subject: [PATCH 102/138] uncommented client tick --- src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 904a4e02d..50c4e3f81 100644 --- a/src/client.rs +++ b/src/client.rs @@ -294,7 +294,7 @@ impl Client { /// Tick the client. pub fn tick(&self) { - //self.chain.read().unwrap().collect_garbage(false); + self.chain.read().unwrap().collect_garbage(false); } } From 38164d6b21323b78f49ddfb1201fe5084a1bda41 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 16:23:24 +0400 Subject: [PATCH 103/138] extra tests --- src/blockchain.rs | 36 ------------------------------------ src/client.rs | 12 ++++++++++++ src/tests/client.rs | 37 ++++++++++++++++++++++++++----------- src/tests/helpers.rs | 37 ++++++++++++++++++++++++++++++++----- 4 files changed, 70 insertions(+), 52 deletions(-) diff --git a/src/blockchain.rs b/src/blockchain.rs index 40abb1c8d..a0b554bcc 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -633,25 +633,6 @@ mod tests { use util::hash::*; use blockchain::*; - fn get_populated_bc() -> BlockChain { - let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07dba07d6b448a186e9612e5f737d1c909dce473e53199901a302c00646d523c1a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a059262c330941f3fe2a34d16d6e3c7b30d2ceb37c6a0e9a994c494ee1a61d2410885aa4c8bf8e56e264c0c0".from_hex().unwrap(); - let b1 = "f90261f901f9a05716670833ec874362d65fea27a7cd35af5897d275b31a44944113111e4e96d2a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cb52de543653d86ccd13ba3ddf8b052525b04231c6884a4db3188a184681d878a0e78628dd45a1f8dc495594d83b76c588a3ee67463260f8b7d4a42f574aeab29aa0e9244cf7503b79c03d3a099e07a80d2dbc77bb0b502d8a89d51ac0d68dd31313b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd882520884562791e580a051b3ecba4e3f2b49c11d42dd0851ec514b1be3138080f72a2b6e83868275d98f8877671f479c414b47f862f86080018304cb2f94095e7baea6a6c7c4c2dfeb977efac326af552d870a801ca09e2709d7ec9bbe6b1bbbf0b2088828d14cd5e8642a1fee22dc74bfa89761a7f9a04bd8813dee4be989accdb708b1c2e325a7e9c695a8024e30e89d6c644e424747c0".from_hex().unwrap(); - let b2 = "f902ccf901f9a0437e51676ff10756fcfee5edd9159fa41dbcb1b2c592850450371cbecd54ee4fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c70a5dc56146e5ef025e4e5726a6373c6f12fd2f6784093a19ead0a7d17fb292a040645cbce4fd399e7bb9160b4c30c40d7ee616a030d4e18ef0ed3b02bdb65911a086e608555f63628417032a011d107b36427af37d153f0da02ce3f90fdd5e8c08b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd882c0e384562791e880a0e3cc39ff775cc0a32f175995b92e84b729e5c9a3563ff899e3555b908bc21d75887c3cde283f4846a6f8cdf8cb01018304cb2f8080b87e6060604052606e8060106000396000f360606040526000357c010000000000000000000000000000000000000000000000000000000090048063c0406226146037576035565b005b60406004506056565b6040518082815260200191505060405180910390f35b6000600560006000508190555060059050606b565b90561ba05258615c63503c0a600d6994b12ea5750d45b3c69668e2a371b4fbfb9eeff6b8a0a11be762bc90491231274a2945be35a43f23c27775b1ff24dd521702fe15f73ec0".from_hex().unwrap(); - let b3a = "f90261f901f9a036fde1253128666fcb95a5956da14a73489e988bb72738717ec1d31e1cee781aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a09dc4b1357c0b7b8108f8a098f4f9a1a274957bc9ebc22a9ae67ae81739e5b19ca007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefd882524d84562791eb80a074861666bd346c025889745c793b91ab9cd1e2ca19b5cf3c50d04d135b0a4d2b8809fe9587ea4cdc04f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba06fd84874d36d5de9e8e48978c03619b53a96b7ae0a4cd1ac118f103098b44801a00572596974dd7df4f9f69bd7456585618c568d8434ef6453391b89281ce12ae1c0".from_hex().unwrap(); - let b3b = "f90265f901f9a036fde1253128666fcb95a5956da14a73489e988bb72738717ec1d31e1cee781aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ab87dc338bfd6f662b1cd90bc0c9e40a1b2146a095312393c9e13ce3a5008b09a0e609b7a7d4b8a2403ec1268627ecd98783627246e8f1b26addb3ff504f76a054a0592fabf92476512952db3a69a2481a42912e668a1ee28c4c322e703bb665f8beb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefd882a1f084562791ee80a0fe7098fa7e4ac5d637eea81fb23f8f78346826dbab430068dd9a249d0afa99818853e1a6b201ae3545f866f86402018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d0284c04062261ca06edc9ce8e7da4cc34067beb325dcad59e5655a164a5100a50bc3eb681b12c716a0abf9053d5de65b1be81fe50d327b84de685efbeecea34e7b747180a6c6023e44c0".from_hex().unwrap(); - - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - - let bc = BlockChain::new(&genesis, &dir); - bc.insert_block(&b1); - bc.insert_block(&b2); - bc.insert_block(&b3a); - bc.insert_block(&b3b); - - bc - } - #[test] fn valid_tests_extra32() { let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0925002c3260b44e44c3edebad1cc442142b03020209df1ab8bb86752edbd2cd7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a0363659b251bf8b819179874c8cce7b9b983d7f3704cbb58a3b334431f7032871889032d09c281e1236c0c0".from_hex().unwrap(); @@ -798,21 +779,4 @@ mod tests { } } - #[test] - fn can_be_squeezed() { - let bc = get_populated_bc(); - bc.squeeze_to_fit(CacheSize { blocks: 0, block_logs: 0, transaction_addresses: 0, block_details: 0, blocks_blooms: 0 } ); - assert_eq!(bc.cache_size().blocks, 1624); - assert_eq!(bc.cache_size().block_details, 3712); - assert_eq!(bc.cache_size().block_logs, 0); - assert_eq!(bc.cache_size().blocks_blooms, 0); - } - - #[test] - fn can_collect_garbage() { - let bc = get_populated_bc(); - bc.collect_garbage(false); - assert_eq!(bc.cache_size().blocks, 1624); - - } } diff --git a/src/client.rs b/src/client.rs index a83ff554e..28b5ada88 100644 --- a/src/client.rs +++ b/src/client.rs @@ -292,6 +292,18 @@ impl Client { pub fn tick(&self) { self.chain.read().unwrap().collect_garbage(false); } + + /// Minimizes cache used by the client. + pub fn minimize_cache(&self) { + self.chain.read().unwrap().squeeze_to_fit( + CacheSize { + blocks: 0, + block_logs: 0, + transaction_addresses: 0, + block_details: 0, + blocks_blooms: 0 + }); + } } impl BlockChainClient for Client { diff --git a/src/tests/client.rs b/src/tests/client.rs index 57ab6e577..2b8a97551 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -1,8 +1,5 @@ use client::{BlockChainClient,Client}; -use std::env; use super::test_common::*; -use std::path::PathBuf; -use spec::*; use super::helpers::*; #[cfg(test)] @@ -35,14 +32,6 @@ fn get_bad_state_dummy_block() -> Bytes { create_test_block(&block_header) } -#[cfg(test)] -fn create_test_block(header: &Header) -> Bytes { - let mut rlp = RlpStream::new_list(3); - rlp.append(header); - rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); - rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); - rlp.out() -} #[cfg(test)] fn get_test_client_with_blocks(blocks: Vec) -> Arc { @@ -108,4 +97,30 @@ fn returns_chain_info() { let block = BlockView::new(&dummy_block); let info = client.chain_info(); assert_eq!(info.best_block_hash, block.header().hash()); +} + +#[test] +fn imports_block_sequence() { + let client = generate_dummy_client(6); + let block = client.block_header_at(5).unwrap(); + + assert!(!block.is_empty()); +} + + +#[test] +fn can_have_cash_minimized() { + let client = generate_dummy_client(20); + client.minimize_cache(); + assert!(client.cache_info().blocks < 2048); + assert!(client.cache_info().block_details < 4096); + assert_eq!(client.cache_info().block_logs, 0); + assert_eq!(client.cache_info().blocks_blooms, 0); +} + +#[test] +fn can_collect_garbage() { + let client = generate_dummy_client(100); + client.tick(); + assert!(client.cache_info().blocks < 100 * 1024); } \ No newline at end of file diff --git a/src/tests/helpers.rs b/src/tests/helpers.rs index 4cbd36692..7f9b8ba42 100644 --- a/src/tests/helpers.rs +++ b/src/tests/helpers.rs @@ -32,23 +32,50 @@ pub fn get_test_spec() -> Spec { Spec::new_test() } + #[cfg(test)] -pub fn generate_dummy_client(block_number: usize) { +pub fn create_test_block(header: &Header) -> Bytes { + let mut rlp = RlpStream::new_list(3); + rlp.append(header); + rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); + rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1); + rlp.out() +} + +#[cfg(test)] +pub fn generate_dummy_client(block_number: usize) -> Arc { let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); + let test_spec = get_test_spec(); + let test_engine = test_spec.to_engine().unwrap(); + let state_root = test_engine.spec().genesis_header().state_root; let mut rolling_hash = test_engine.spec().genesis_header().hash(); - let mut rolling_state = test_engine.spec().genesis_header().state_root; let mut rolling_block_number = 1; + let mut rolling_timestamp = 40; for _ in 0..block_number { let mut header = Header::new(); header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap()); header.difficulty = decode(test_engine.spec().engine_params.get("minimumDifficulty").unwrap()); - header.timestamp = 40; + header.timestamp = rolling_timestamp; header.number = rolling_block_number; - header.parent_hash = test_engine.spec().genesis_header().hash(); - header.state_root = test_engine.spec().genesis_header().state_root; + header.parent_hash = rolling_hash; + header.state_root = state_root.clone(); + + rolling_hash = header.hash(); + rolling_block_number = rolling_block_number + 1; + rolling_timestamp = rolling_timestamp + 10; + + if let Err(_) = client.import_block(create_test_block(&header)) { + panic!("error importing block which is valid by definition"); + } + } + client.flush_queue(); + client.import_verified_blocks(&IoChannel::disconnected()); + + client + } \ No newline at end of file From fe0363e230440341d7540444dd295bdbef2b7e16 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 13:28:15 +0100 Subject: [PATCH 104/138] Fix import for bcMultiChainTest. Fixes #223 --- src/block_queue.rs | 14 ++++++++------ src/client.rs | 15 +++++++++------ src/error.rs | 2 +- src/sync/chain.rs | 4 ++-- src/sync/tests.rs | 11 ++++++----- src/tests/chain.rs | 20 +++++++++++++------- 6 files changed, 39 insertions(+), 27 deletions(-) diff --git a/src/block_queue.rs b/src/block_queue.rs index e0868c011..dab3a5fe3 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -200,34 +200,36 @@ impl BlockQueue { /// Add a block to the queue. pub fn import_block(&mut self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); - if self.processing.contains(&header.hash()) { + let h = header.hash(); + if self.processing.contains(&h) { return Err(ImportError::AlreadyQueued); } { let mut verification = self.verification.lock().unwrap(); - if verification.bad.contains(&header.hash()) { + if verification.bad.contains(&h) { return Err(ImportError::Bad(None)); } if verification.bad.contains(&header.parent_hash) { - verification.bad.insert(header.hash()); + verification.bad.insert(h.clone()); return Err(ImportError::Bad(None)); } } match verify_block_basic(&header, &bytes, self.engine.deref().deref()) { Ok(()) => { - self.processing.insert(header.hash()); + self.processing.insert(h.clone()); self.verification.lock().unwrap().unverified.push_back(UnVerifiedBlock { header: header, bytes: bytes }); self.more_to_verify.notify_all(); + Ok(h) }, Err(err) => { flushln!("Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); - self.verification.lock().unwrap().bad.insert(header.hash()); + self.verification.lock().unwrap().bad.insert(h.clone()); + Err(From::from(err)) } } - Ok(()) } /// Mark given block and all its children as bad. Stops verification. diff --git a/src/client.rs b/src/client.rs index a83ff554e..8aea437e4 100644 --- a/src/client.rs +++ b/src/client.rs @@ -193,7 +193,8 @@ impl Client { } /// This is triggered by a message coming from a block queue when the block is ready for insertion - pub fn import_verified_blocks(&self, _io: &IoChannel) { + pub fn import_verified_blocks(&self, _io: &IoChannel) -> usize { + let mut ret = 0; let mut bad = HashSet::new(); let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.write().unwrap().drain(128); @@ -211,7 +212,7 @@ impl Client { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); - return; + break; }; let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) { Some(p) => p, @@ -220,7 +221,7 @@ impl Client { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); - return; + break; }, }; // build last hashes @@ -244,14 +245,14 @@ impl Client { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); bad.insert(block.header.hash()); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); - return; + break; } }; if let Err(e) = verify_block_final(&header, result.block().header()) { flushln!("Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); - return; + break; } self.chain.write().unwrap().insert_block(&block.bytes); //TODO: err here? @@ -260,12 +261,14 @@ impl Client { Ok(_) => (), Err(e) => { warn!(target: "client", "State DB commit failed: {:?}", e); - return; + break; } } self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); + ret += 1; } + ret } /// Clear cached state overlay diff --git a/src/error.rs b/src/error.rs index bc2bdfe97..cdc0c2d25 100644 --- a/src/error.rs +++ b/src/error.rs @@ -145,7 +145,7 @@ impl From for ImportError { } /// Result of import block operation. -pub type ImportResult = Result<(), ImportError>; +pub type ImportResult = Result; #[derive(Debug)] /// General error type which should be capable of representing all errors in ethcore. diff --git a/src/sync/chain.rs b/src/sync/chain.rs index aaba701c2..1f79477af 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -415,7 +415,7 @@ impl ChainSync { Err(ImportError::AlreadyQueued) => { trace!(target: "sync", "New block already queued {:?}", h); }, - Ok(()) => { + Ok(_) => { trace!(target: "sync", "New block queued {:?}", h); }, Err(e) => { @@ -680,7 +680,7 @@ impl ChainSync { self.last_imported_block = headers.0 + i as BlockNumber; self.last_imported_hash = h.clone(); }, - Ok(()) => { + Ok(_) => { trace!(target: "sync", "Block queued {:?}", h); self.last_imported_block = headers.0 + i as BlockNumber; self.last_imported_hash = h.clone(); diff --git a/src/sync/tests.rs b/src/sync/tests.rs index 50d6efab2..cc0645b50 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -114,6 +114,7 @@ impl BlockChainClient for TestBlockChainClient { fn import_block(&self, b: Bytes) -> ImportResult { let header = Rlp::new(&b).val_at::(0); + let h = header.hash(); let number: usize = header.number as usize; if number > self.blocks.read().unwrap().len() { panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); @@ -134,9 +135,9 @@ impl BlockChainClient for TestBlockChainClient { let len = self.numbers.read().unwrap().len(); if number == len { *self.difficulty.write().unwrap().deref_mut() += header.difficulty; - mem::replace(self.last_hash.write().unwrap().deref_mut(), header.hash()); - self.blocks.write().unwrap().insert(header.hash(), b); - self.numbers.write().unwrap().insert(number, header.hash()); + mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone()); + self.blocks.write().unwrap().insert(h.clone(), b); + self.numbers.write().unwrap().insert(number, h.clone()); let mut parent_hash = header.parent_hash; if number > 0 { let mut n = number - 1; @@ -148,9 +149,9 @@ impl BlockChainClient for TestBlockChainClient { } } else { - self.blocks.write().unwrap().insert(header.hash(), b.to_vec()); + self.blocks.write().unwrap().insert(h.clone(), b.to_vec()); } - Ok(()) + Ok(h) } fn queue_info(&self) -> BlockQueueInfo { diff --git a/src/tests/chain.rs b/src/tests/chain.rs index ec899c73b..8ba5e3671 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -21,7 +21,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { flush(format!(" - {}...", name)); - let blocks: Vec = test["blocks"].as_array().unwrap().iter().map(|e| xjson!(&e["rlp"])).collect(); + let blocks: Vec<(Bytes, bool)> = test["blocks"].as_array().unwrap().iter().map(|e| (xjson!(&e["rlp"]), e.find("blockHeader").is_some())).collect(); let mut spec = ethereum::new_frontier_like_test(); let s = PodState::from_json(test.find("pre").unwrap()); spec.set_genesis_state(s); @@ -32,11 +32,17 @@ fn do_json_test(json_data: &[u8]) -> Vec { dir.push(H32::random().hex()); { let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); - for b in blocks.into_iter().filter(|ref b| Block::is_good(b)) { - client.import_block(b).unwrap(); + for (b, is_valid) in blocks.into_iter() { + let mut hash = H256::new(); + if Block::is_good(&b) { + if let Ok(h) = client.import_block(b.clone()) { + hash = h; + } + } + client.flush_queue(); + let imported_ok = client.import_verified_blocks(&IoChannel::disconnected()) > 0; + assert_eq!(imported_ok, is_valid); // may yet be invalid for the later stages, so can't do a hard check. } - client.flush_queue(); - client.import_verified_blocks(&IoChannel::disconnected()); fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); } fs::remove_dir_all(&dir).unwrap(); @@ -55,8 +61,8 @@ declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTes declare_test!{BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // STILL FAILS declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} -declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // FAILS -declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // FAILS +declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} +declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} From f6d7adf693973bbf92b6d0c7192728e6d31a21af Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 13:29:52 +0100 Subject: [PATCH 105/138] Remove misleading comment. --- src/tests/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 8ba5e3671..894303a4c 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -41,7 +41,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { } client.flush_queue(); let imported_ok = client.import_verified_blocks(&IoChannel::disconnected()) > 0; - assert_eq!(imported_ok, is_valid); // may yet be invalid for the later stages, so can't do a hard check. + assert_eq!(imported_ok, is_valid); } fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); } From 1897d55a791e7ea77e6840d4eb6437bf6a8cc99f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 16:41:41 +0400 Subject: [PATCH 106/138] removed unused cfg options --- src/tests/client.rs | 3 --- src/tests/helpers.rs | 7 +------ 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/tests/client.rs b/src/tests/client.rs index 2b8a97551..92bf31c89 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -2,7 +2,6 @@ use client::{BlockChainClient,Client}; use super::test_common::*; use super::helpers::*; -#[cfg(test)] fn get_good_dummy_block() -> Bytes { let mut block_header = Header::new(); let test_spec = get_test_spec(); @@ -17,7 +16,6 @@ fn get_good_dummy_block() -> Bytes { create_test_block(&block_header) } -#[cfg(test)] fn get_bad_state_dummy_block() -> Bytes { let mut block_header = Header::new(); let test_spec = get_test_spec(); @@ -33,7 +31,6 @@ fn get_bad_state_dummy_block() -> Bytes { } -#[cfg(test)] fn get_test_client_with_blocks(blocks: Vec) -> Arc { let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); for block in &blocks { diff --git a/src/tests/helpers.rs b/src/tests/helpers.rs index 7f9b8ba42..4aa2ae8fc 100644 --- a/src/tests/helpers.rs +++ b/src/tests/helpers.rs @@ -5,11 +5,10 @@ use std::path::PathBuf; use spec::*; use std::fs::{create_dir_all}; -#[cfg(test)] + const FIXED_TEMP_DIR_NAME: &'static str = "parity-temp"; -#[cfg(test)] pub fn get_tests_temp_dir() -> PathBuf { let mut dir = env::temp_dir(); dir.push(FIXED_TEMP_DIR_NAME); @@ -19,7 +18,6 @@ pub fn get_tests_temp_dir() -> PathBuf { dir } -#[cfg(test)] pub fn get_random_path() -> PathBuf { let mut dir = get_tests_temp_dir(); dir.push(H32::random().hex()); @@ -27,13 +25,11 @@ pub fn get_random_path() -> PathBuf { } -#[cfg(test)] pub fn get_test_spec() -> Spec { Spec::new_test() } -#[cfg(test)] pub fn create_test_block(header: &Header) -> Bytes { let mut rlp = RlpStream::new_list(3); rlp.append(header); @@ -42,7 +38,6 @@ pub fn create_test_block(header: &Header) -> Bytes { rlp.out() } -#[cfg(test)] pub fn generate_dummy_client(block_number: usize) -> Arc { let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); From 500dd1480dbcd38abac150f0d03e116064c8cca2 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 13:59:14 +0100 Subject: [PATCH 107/138] temporarily comment out checking zero prefixed int --- util/src/bytes.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 3144dd482..b8d440345 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -274,8 +274,8 @@ pub enum FromBytesError { DataIsTooShort, /// TODO [debris] Please document me DataIsTooLong, - /// Integer-representation is non-canonically prefixed with zero byte(s). - ZeroPrefixedInt, + // Integer-representation is non-canonically prefixed with zero byte(s). + //ZeroPrefixedInt, } impl StdError for FromBytesError { @@ -312,9 +312,9 @@ macro_rules! impl_uint_from_bytes { match bytes.len() { 0 => Ok(0), l if l <= mem::size_of::<$to>() => { - if bytes[0] == 0 { - return Err(FromBytesError::ZeroPrefixedInt) - } + //if bytes[0] == 0 { + //return Err(FromBytesError::ZeroPrefixedInt) + //} let mut res = 0 as $to; for i in 0..l { let shift = (l - 1 - i) * 8; @@ -349,9 +349,10 @@ macro_rules! impl_uint_from_bytes { ($name: ident) => { impl FromBytes for $name { fn from_bytes(bytes: &[u8]) -> FromBytesResult<$name> { - if !bytes.is_empty() && bytes[0] == 0 { - Err(FromBytesError::ZeroPrefixedInt) - } else if bytes.len() <= $name::SIZE { + //if !bytes.is_empty() && bytes[0] == 0 { + //Err(FromBytesError::ZeroPrefixedInt) + //} else + if bytes.len() <= $name::SIZE { Ok($name::from(bytes)) } else { Err(FromBytesError::DataIsTooLong) From 2f7857520e2f3f7de101b0dc669258e68bb6b777 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 14:09:32 +0100 Subject: [PATCH 108/138] Fix block number check (makme it strict!). --- src/error.rs | 4 +++- src/tests/chain.rs | 15 +++++++++------ src/verification.rs | 10 +++++----- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/error.rs b/src/error.rs index cdc0c2d25..4ca988679 100644 --- a/src/error.rs +++ b/src/error.rs @@ -120,7 +120,9 @@ pub enum BlockError { /// TODO [arkpar] Please document me InvalidParentHash(Mismatch), /// TODO [arkpar] Please document me - InvalidNumber(OutOfBounds), + InvalidNumber(Mismatch), + /// Block number isn't sensible. + RidiculousNumber(OutOfBounds), /// TODO [arkpar] Please document me UnknownParent(H256), /// TODO [Gav Wood] Please document me diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 894303a4c..dfc46ae5a 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -42,6 +42,9 @@ fn do_json_test(json_data: &[u8]) -> Vec { client.flush_queue(); let imported_ok = client.import_verified_blocks(&IoChannel::disconnected()) > 0; assert_eq!(imported_ok, is_valid); + if imported_ok { + flushln!("Imported {}; best block {}", hash, client.chain_info().best_block_hash); + } } fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); } @@ -60,13 +63,13 @@ declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest" declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // STILL FAILS declare_test!{BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // STILL FAILS declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} -declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} +declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} // FAILS AGAIN? declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} -declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} -declare_test!{BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} // FAILS -declare_test!{BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // FAILS -declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // FAILS -declare_test!{BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} // FAILS +declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} // FAILS AGAIN? +declare_test!{BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} +declare_test!{BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} +declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // STILL FAILS +declare_test!{BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} diff --git a/src/verification.rs b/src/verification.rs index 064c0b7d7..158f28f95 100644 --- a/src/verification.rs +++ b/src/verification.rs @@ -162,7 +162,7 @@ pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> /// Check basic header parameters. fn verify_header(header: &Header, engine: &Engine) -> Result<(), Error> { if header.number >= From::from(BlockNumber::max_value()) { - return Err(From::from(BlockError::InvalidNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number }))) + return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number }))) } if header.gas_used > header.gas_limit { return Err(From::from(BlockError::TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit), min: None, found: header.gas_used }))); @@ -186,8 +186,8 @@ fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> { if header.timestamp <= parent.timestamp { return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(parent.timestamp + 1), found: header.timestamp }))) } - if header.number <= parent.number { - return Err(From::from(BlockError::InvalidNumber(OutOfBounds { max: None, min: Some(parent.number + 1), found: header.number }))); + if header.number != parent.number + 1 { + return Err(From::from(BlockError::InvalidNumber(Mismatch { expected: parent.number + 1, found: header.number }))); } Ok(()) } @@ -400,7 +400,7 @@ mod tests { header = good.clone(); header.number = BlockNumber::max_value(); check_fail(basic_test(&create_test_block(&header), engine.deref()), - InvalidNumber(OutOfBounds { max: Some(BlockNumber::max_value()), min: None, found: header.number })); + RidiculousNumber(OutOfBounds { max: Some(BlockNumber::max_value()), min: None, found: header.number })); header = good.clone(); header.gas_used = header.gas_limit + From::from(1); @@ -443,7 +443,7 @@ mod tests { header = good.clone(); header.number = 9; check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), - InvalidNumber(OutOfBounds { max: None, min: Some(parent.number + 1), found: header.number })); + InvalidNumber(Mismatch { expected: parent.number + 1, found: header.number })); header = good.clone(); let mut bad_uncles = good_uncles.clone(); From 16138533bc0fcef75a322e2bf7e78f103bc4c206 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 14:10:52 +0100 Subject: [PATCH 109/138] Remove miseading comment. --- src/tests/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index dfc46ae5a..661968764 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -63,7 +63,7 @@ declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest" declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // STILL FAILS declare_test!{BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // STILL FAILS declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} -declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} // FAILS AGAIN? +declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} From d2cd6f69cbb1bb25c16d84988419c57f09093eed Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 17:21:54 +0400 Subject: [PATCH 110/138] added temp directory struct with drop --- src/tests/client.rs | 15 ++++++++++----- src/tests/helpers.rs | 42 ++++++++++++++++++++++++++---------------- 2 files changed, 36 insertions(+), 21 deletions(-) diff --git a/src/tests/client.rs b/src/tests/client.rs index 92bf31c89..f6887cb9c 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -32,7 +32,8 @@ fn get_bad_state_dummy_block() -> Bytes { fn get_test_client_with_blocks(blocks: Vec) -> Arc { - let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); + let dir = RandomTempPath::new(); + let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap(); for block in &blocks { if let Err(_) = client.import_block(block.clone()) { panic!("panic importing block which is well-formed"); @@ -46,20 +47,23 @@ fn get_test_client_with_blocks(blocks: Vec) -> Arc { #[test] fn created() { - let client_result = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()); + let dir = RandomTempPath::new(); + let client_result = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()); assert!(client_result.is_ok()); } #[test] fn imports_from_empty() { - let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); + let dir = RandomTempPath::new(); + let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap(); client.import_verified_blocks(&IoChannel::disconnected()); client.flush_queue(); } #[test] fn imports_good_block() { - let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); + let dir = RandomTempPath::new(); + let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap(); let good_block = get_good_dummy_block(); if let Err(_) = client.import_block(good_block) { panic!("error importing block being good by definition"); @@ -73,7 +77,8 @@ fn imports_good_block() { #[test] fn query_none_block() { - let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); + let dir = RandomTempPath::new(); + let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap(); let non_existant = client.block_header_at(188); assert!(non_existant.is_none()); diff --git a/src/tests/helpers.rs b/src/tests/helpers.rs index 4aa2ae8fc..e7c717f42 100644 --- a/src/tests/helpers.rs +++ b/src/tests/helpers.rs @@ -3,25 +3,33 @@ use std::env; use super::test_common::*; use std::path::PathBuf; use spec::*; -use std::fs::{create_dir_all}; +use std::fs::{remove_dir_all}; -const FIXED_TEMP_DIR_NAME: &'static str = "parity-temp"; - - -pub fn get_tests_temp_dir() -> PathBuf { - let mut dir = env::temp_dir(); - dir.push(FIXED_TEMP_DIR_NAME); - if let Err(_) = create_dir_all(&dir) { - panic!("failed to create test dir!"); - } - dir +pub struct RandomTempPath { + path: PathBuf } -pub fn get_random_path() -> PathBuf { - let mut dir = get_tests_temp_dir(); - dir.push(H32::random().hex()); - dir +impl RandomTempPath { + pub fn new() -> RandomTempPath { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + RandomTempPath { + path: dir.clone() + } + } + + pub fn as_path(&self) -> &PathBuf { + &self.path + } +} + +impl Drop for RandomTempPath { + fn drop(&mut self) { + if let Err(e) = remove_dir_all(self.as_path()) { + panic!("failed to remove temp directory, probably something failed to destroyed ({})", e); + } + } } @@ -39,7 +47,9 @@ pub fn create_test_block(header: &Header) -> Bytes { } pub fn generate_dummy_client(block_number: usize) -> Arc { - let client = Client::new(get_test_spec(), &get_random_path(), IoChannel::disconnected()).unwrap(); + let dir = RandomTempPath::new(); + + let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap(); let test_spec = get_test_spec(); let test_engine = test_spec.to_engine().unwrap(); From 322c1a6cb27ab9a9c2a691e84b1f96a8123b831a Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 14:25:12 +0100 Subject: [PATCH 111/138] use jsonrpc 1.1, moved params deserialization to jsonrpc-core --- rpc/Cargo.toml | 4 +-- rpc/src/impls/eth.rs | 66 ++++++++++++++++++++---------------------- rpc/src/types/block.rs | 10 ------- rpc/src/types/mod.rs | 15 ---------- 4 files changed, 33 insertions(+), 62 deletions(-) diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index ccdd46679..1f10180d6 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -11,8 +11,8 @@ authors = ["Marek Kotewicz , @@ -30,7 +30,7 @@ impl Eth for EthClient { fn author(&self, params: Params) -> Result { match params { - Params::None => Ok(to_value(&Address::new())), + Params::None => to_value(&Address::new()), _ => Err(Error::invalid_params()) } } @@ -68,39 +68,35 @@ impl Eth for EthClient { } fn block(&self, params: Params) -> Result { - if let Params::Array(ref arr) = params { - if let [ref h, Value::Bool(ref _include_txs)] = arr as &[Value] { - if let Ok(hash) = from_value::(h.clone()) { - return match (self.client.block_header(&hash), self.client.block_details(&hash)) { - (Some(bytes), Some(details)) => { - let view = HeaderView::new(&bytes); - let block = Block { - hash: view.sha3(), - parent_hash: view.parent_hash(), - uncles_hash: view.uncles_hash(), - author: view.author(), - miner: view.author(), - state_root: view.state_root(), - transactions_root: view.transactions_root(), - receipts_root: view.receipts_root(), - number: U256::from(view.number()), - gas_used: view.gas_used(), - gas_limit: view.gas_limit(), - logs_bloom: view.log_bloom(), - timestamp: U256::from(view.timestamp()), - difficulty: view.difficulty(), - total_difficulty: details.total_difficulty, - uncles: vec![], - transactions: vec![] - }; - Ok(to_value(&block)) - }, - _ => Ok(Value::Null), - } - } - } + match from_params::<(H256, bool)>(params) { + Ok((hash, _include_txs)) => match (self.client.block_header(&hash), self.client.block_details(&hash)) { + (Some(bytes), Some(details)) => { + let view = HeaderView::new(&bytes); + let block = Block { + hash: view.sha3(), + parent_hash: view.parent_hash(), + uncles_hash: view.uncles_hash(), + author: view.author(), + miner: view.author(), + state_root: view.state_root(), + transactions_root: view.transactions_root(), + receipts_root: view.receipts_root(), + number: U256::from(view.number()), + gas_used: view.gas_used(), + gas_limit: view.gas_limit(), + logs_bloom: view.log_bloom(), + timestamp: U256::from(view.timestamp()), + difficulty: view.difficulty(), + total_difficulty: details.total_difficulty, + uncles: vec![], + transactions: vec![] + }; + to_value(&block) + }, + _ => Ok(Value::Null) + }, + Err(err) => Err(err) } - Err(Error::invalid_params()) } } @@ -126,6 +122,6 @@ impl EthFilter for EthFilterClient { } fn filter_changes(&self, _: Params) -> Result { - Ok(Value::Array(vec![to_value(&self.client.chain_info().best_block_hash)])) + to_value(&self.client.chain_info().best_block_hash).map(|v| Value::Array(vec![v])) } } diff --git a/rpc/src/types/block.rs b/rpc/src/types/block.rs index ac1e673a5..740cf3e09 100644 --- a/rpc/src/types/block.rs +++ b/rpc/src/types/block.rs @@ -34,13 +34,3 @@ pub struct Block { pub uncles: Vec, pub transactions: Vec } - -#[test] -fn test_block_serialize() { - use serde_json; - - let block = Block::default(); - let serialized = serde_json::to_string(&block).unwrap(); - println!("s: {:?}", serialized); - //assert!(false); -} diff --git a/rpc/src/types/mod.rs b/rpc/src/types/mod.rs index 0b7d97916..7be32e84d 100644 --- a/rpc/src/types/mod.rs +++ b/rpc/src/types/mod.rs @@ -1,18 +1,3 @@ -use serde::{Serialize, Deserialize, de}; -use serde_json::value::{Value, Serializer, Deserializer}; - mod block; -pub fn to_value(s: &S) -> Value where S: Serialize { - let mut serializer = Serializer::new(); - // should never panic! - s.serialize(&mut serializer).unwrap(); - serializer.unwrap() -} - -pub fn from_value(value: Value) -> Result::Error> where D: Deserialize { - let mut deserialier = Deserializer::new(value); - Deserialize::deserialize(&mut deserialier) -} - pub use self::block::Block; From e068bad4e0488bf090c3590cbe1a171565af627e Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 14:31:43 +0100 Subject: [PATCH 112/138] Revert "temporarily comment out checking zero prefixed int" This reverts commit 500dd1480dbcd38abac150f0d03e116064c8cca2. --- util/src/bytes.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/util/src/bytes.rs b/util/src/bytes.rs index b8d440345..3144dd482 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -274,8 +274,8 @@ pub enum FromBytesError { DataIsTooShort, /// TODO [debris] Please document me DataIsTooLong, - // Integer-representation is non-canonically prefixed with zero byte(s). - //ZeroPrefixedInt, + /// Integer-representation is non-canonically prefixed with zero byte(s). + ZeroPrefixedInt, } impl StdError for FromBytesError { @@ -312,9 +312,9 @@ macro_rules! impl_uint_from_bytes { match bytes.len() { 0 => Ok(0), l if l <= mem::size_of::<$to>() => { - //if bytes[0] == 0 { - //return Err(FromBytesError::ZeroPrefixedInt) - //} + if bytes[0] == 0 { + return Err(FromBytesError::ZeroPrefixedInt) + } let mut res = 0 as $to; for i in 0..l { let shift = (l - 1 - i) * 8; @@ -349,10 +349,9 @@ macro_rules! impl_uint_from_bytes { ($name: ident) => { impl FromBytes for $name { fn from_bytes(bytes: &[u8]) -> FromBytesResult<$name> { - //if !bytes.is_empty() && bytes[0] == 0 { - //Err(FromBytesError::ZeroPrefixedInt) - //} else - if bytes.len() <= $name::SIZE { + if !bytes.is_empty() && bytes[0] == 0 { + Err(FromBytesError::ZeroPrefixedInt) + } else if bytes.len() <= $name::SIZE { Ok($name::from(bytes)) } else { Err(FromBytesError::DataIsTooLong) From 1402fd5c4c455e95ad0d1ed87fa3f1bf2f028454 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 14:32:10 +0100 Subject: [PATCH 113/138] updated eth filter comment --- rpc/src/traits/eth.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/src/traits/eth.rs b/rpc/src/traits/eth.rs index 35b59a91c..63aadbc74 100644 --- a/rpc/src/traits/eth.rs +++ b/rpc/src/traits/eth.rs @@ -44,7 +44,7 @@ pub trait Eth: Sized + Send + Sync + 'static { } } -// TODO: do filters api properly if we commit outselves to polling again... +// TODO: do filters api properly pub trait EthFilter: Sized + Send + Sync + 'static { /// Returns id of new block filter fn new_block_filter(&self, _: Params) -> Result { rpcerr!() } From 7ffe9344ed730b2b6981dc46608a6e147c0f4f01 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 14:43:43 +0100 Subject: [PATCH 114/138] replaced client block_details with block_total_difficulty --- rpc/src/impls/eth.rs | 6 +++--- src/client.rs | 16 +++++++++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/rpc/src/impls/eth.rs b/rpc/src/impls/eth.rs index af71ac0d5..ac27111d6 100644 --- a/rpc/src/impls/eth.rs +++ b/rpc/src/impls/eth.rs @@ -69,8 +69,8 @@ impl Eth for EthClient { fn block(&self, params: Params) -> Result { match from_params::<(H256, bool)>(params) { - Ok((hash, _include_txs)) => match (self.client.block_header(&hash), self.client.block_details(&hash)) { - (Some(bytes), Some(details)) => { + Ok((hash, _include_txs)) => match (self.client.block_header(&hash), self.client.block_total_difficulty(&hash)) { + (Some(bytes), Some(total_difficulty)) => { let view = HeaderView::new(&bytes); let block = Block { hash: view.sha3(), @@ -87,7 +87,7 @@ impl Eth for EthClient { logs_bloom: view.log_bloom(), timestamp: U256::from(view.timestamp()), difficulty: view.difficulty(), - total_difficulty: details.total_difficulty, + total_difficulty: total_difficulty, uncles: vec![], transactions: vec![] }; diff --git a/src/client.rs b/src/client.rs index 50c4e3f81..0d0fcae95 100644 --- a/src/client.rs +++ b/src/client.rs @@ -13,7 +13,6 @@ use service::NetSyncMessage; use env_info::LastHashes; use verification::*; use block::*; -use extras::BlockDetails; /// General block status #[derive(Debug)] @@ -67,8 +66,8 @@ pub trait BlockChainClient : Sync + Send { /// Get block status by block header hash. fn block_status(&self, hash: &H256) -> BlockStatus; - /// Get familial details concerning a block. - fn block_details(&self, hash: &H256) -> Option; + /// Get block total difficulty. + fn block_total_difficulty(&self, hash: &H256) -> Option; /// Get raw block header data by block number. fn block_header_at(&self, n: BlockNumber) -> Option; @@ -83,6 +82,9 @@ pub trait BlockChainClient : Sync + Send { /// Get block status by block number. fn block_status_at(&self, n: BlockNumber) -> BlockStatus; + /// Get block total difficulty. + fn block_total_difficulty_at(&self, n: BlockNumber) -> Option; + /// Get a tree route between `from` and `to`. /// See `BlockChain::tree_route`. fn tree_route(&self, from: &H256, to: &H256) -> Option; @@ -321,8 +323,8 @@ impl BlockChainClient for Client { if self.chain.read().unwrap().is_known(&hash) { BlockStatus::InChain } else { BlockStatus::Unknown } } - fn block_details(&self, hash: &H256) -> Option { - self.chain.read().unwrap().block_details(hash) + fn block_total_difficulty(&self, hash: &H256) -> Option { + self.chain.read().unwrap().block_details(hash).map(|d| d.total_difficulty) } fn block_header_at(&self, n: BlockNumber) -> Option { @@ -344,6 +346,10 @@ impl BlockChainClient for Client { } } + fn block_total_difficulty_at(&self, n: BlockNumber) -> Option { + self.chain.read().unwrap().block_hash(n).and_then(|h| self.block_total_difficulty(&h)) + } + fn tree_route(&self, from: &H256, to: &H256) -> Option { self.chain.read().unwrap().tree_route(from.clone(), to.clone()) } From 9979e159bd0f962b927e25a571e9a266f03fc12b Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 14:44:02 +0100 Subject: [PATCH 115/138] Ethash nonce is H64 not a u64. --- src/ethereum/ethash.rs | 8 ++++---- util/src/hash.rs | 12 +++++++++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index a677a86cc..019d764df 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -101,7 +101,7 @@ impl Engine for Ethash { fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { // check the seal fields. try!(UntrustedRlp::new(&header.seal[0]).as_val::()); - try!(UntrustedRlp::new(&header.seal[1]).as_val::()); + try!(UntrustedRlp::new(&header.seal[1]).as_val::()); let min_difficulty = decode(self.spec().engine_params.get("minimumDifficulty").unwrap()); if header.difficulty < min_difficulty { @@ -109,7 +109,7 @@ impl Engine for Ethash { } let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(quick_get_difficulty( &Ethash::to_ethash(header.bare_hash()), - header.nonce(), + header.nonce().low_u64(), &Ethash::to_ethash(header.mix_hash())))); if difficulty < header.difficulty { return Err(From::from(BlockError::InvalidEthashDifficulty(Mismatch { expected: header.difficulty, found: difficulty }))); @@ -118,7 +118,7 @@ impl Engine for Ethash { } fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { - let result = self.pow.compute_light(header.number as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce()); + let result = self.pow.compute_light(header.number as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64()); let mix = Ethash::from_ethash(result.mix_hash); let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(result.value)); if mix != header.mix_hash() { @@ -208,7 +208,7 @@ impl Ethash { } impl Header { - fn nonce(&self) -> u64 { + fn nonce(&self) -> H64 { decode(&self.seal()[1]) } fn mix_hash(&self) -> H256 { diff --git a/util/src/hash.rs b/util/src/hash.rs index 252877a24..8b55c01cd 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -41,6 +41,8 @@ pub trait FixedHash: Sized + BytesConvertable + Populatable + FromStr + Default fn contains<'a>(&'a self, b: &'a Self) -> bool; /// TODO [debris] Please document me fn is_zero(&self) -> bool; + /// Return the lowest 8 bytes interpreted as a BigEndian integer. + fn low_u64(&self) -> u64; } fn clean_0x(s: &str) -> &str { @@ -71,8 +73,8 @@ macro_rules! impl_hash { &self.0 } } - impl DerefMut for $from { + impl DerefMut for $from { #[inline] fn deref_mut(&mut self) -> &mut [u8] { &mut self.0 @@ -190,6 +192,14 @@ macro_rules! impl_hash { fn is_zero(&self) -> bool { self.eq(&Self::new()) } + + fn low_u64(&self) -> u64 { + let mut ret = 0u64; + for i in 0..min($size, 8) { + ret |= (self.0[$size - 1 - i] as u64) << (i * 8); + } + ret + } } impl FromStr for $from { From a012043e18365ec6e0192bad2f54229bc6c4ac50 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 14:55:50 +0100 Subject: [PATCH 116/138] Fixes #226. --- src/tests/chain.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 661968764..97dc2767f 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -68,8 +68,8 @@ declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTes declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} -declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} // FAILS AGAIN? +declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} // FAILS: ZeroPrefixed Int. declare_test!{BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} declare_test!{BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} -declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // STILL FAILS +declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} declare_test!{BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} From d7bfc6f10b511ab365ac72f91376651cf0b571f1 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 15:01:59 +0100 Subject: [PATCH 117/138] Remove misleading comment. --- src/tests/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 97dc2767f..0cf13acb4 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -68,7 +68,7 @@ declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTes declare_test!{BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"} declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} -declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} // FAILS: ZeroPrefixed Int. +declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"} declare_test!{BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} declare_test!{BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} From f1edf627ae18a7ad54098373cae8df8facf1581f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 19:01:33 +0400 Subject: [PATCH 118/138] fixed notes --- src/blockchain.rs | 1 - src/client.rs | 11 ----------- src/tests/client.rs | 11 ----------- 3 files changed, 23 deletions(-) diff --git a/src/blockchain.rs b/src/blockchain.rs index a0b554bcc..da9ee04c2 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -778,5 +778,4 @@ mod tests { assert_eq!(bc.best_block_hash(), b1_hash); } } - } diff --git a/src/client.rs b/src/client.rs index 28b5ada88..9aa9854b6 100644 --- a/src/client.rs +++ b/src/client.rs @@ -293,17 +293,6 @@ impl Client { self.chain.read().unwrap().collect_garbage(false); } - /// Minimizes cache used by the client. - pub fn minimize_cache(&self) { - self.chain.read().unwrap().squeeze_to_fit( - CacheSize { - blocks: 0, - block_logs: 0, - transaction_addresses: 0, - block_details: 0, - blocks_blooms: 0 - }); - } } impl BlockChainClient for Client { diff --git a/src/tests/client.rs b/src/tests/client.rs index f6887cb9c..f6d603f43 100644 --- a/src/tests/client.rs +++ b/src/tests/client.rs @@ -109,17 +109,6 @@ fn imports_block_sequence() { assert!(!block.is_empty()); } - -#[test] -fn can_have_cash_minimized() { - let client = generate_dummy_client(20); - client.minimize_cache(); - assert!(client.cache_info().blocks < 2048); - assert!(client.cache_info().block_details < 4096); - assert_eq!(client.cache_info().block_logs, 0); - assert_eq!(client.cache_info().blocks_blooms, 0); -} - #[test] fn can_collect_garbage() { let client = generate_dummy_client(100); From 7e7b2880957442adc83732ecd8eef21193b251af Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 16:02:03 +0100 Subject: [PATCH 119/138] Fixes #221 --- src/tests/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 0cf13acb4..44e30e7ea 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -61,7 +61,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { declare_test!{BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // STILL FAILS -declare_test!{BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // STILL FAILS +declare_test!{BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} declare_test!{BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} From d4c76eed0f55141bdfbaa0a9f074c6cb94eebc77 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 16:05:23 +0100 Subject: [PATCH 120/138] Fixes #220. --- src/tests/chain.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 44e30e7ea..babd2a42f 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -42,9 +42,6 @@ fn do_json_test(json_data: &[u8]) -> Vec { client.flush_queue(); let imported_ok = client.import_verified_blocks(&IoChannel::disconnected()) > 0; assert_eq!(imported_ok, is_valid); - if imported_ok { - flushln!("Imported {}; best block {}", hash, client.chain_info().best_block_hash); - } } fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); } @@ -60,7 +57,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { declare_test!{BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} -declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // STILL FAILS +declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} declare_test!{BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"} declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"} From 0312c08494e6779829c42393a3ed96b5f8dfa454 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 19:07:35 +0400 Subject: [PATCH 121/138] spaces fix --- src/client.rs | 1 - src/tests/helpers.rs | 6 ------ 2 files changed, 7 deletions(-) diff --git a/src/client.rs b/src/client.rs index 9aa9854b6..a83ff554e 100644 --- a/src/client.rs +++ b/src/client.rs @@ -292,7 +292,6 @@ impl Client { pub fn tick(&self) { self.chain.read().unwrap().collect_garbage(false); } - } impl BlockChainClient for Client { diff --git a/src/tests/helpers.rs b/src/tests/helpers.rs index e7c717f42..a566392cc 100644 --- a/src/tests/helpers.rs +++ b/src/tests/helpers.rs @@ -32,12 +32,10 @@ impl Drop for RandomTempPath { } } - pub fn get_test_spec() -> Spec { Spec::new_test() } - pub fn create_test_block(header: &Header) -> Bytes { let mut rlp = RlpStream::new_list(3); rlp.append(header); @@ -50,7 +48,6 @@ pub fn generate_dummy_client(block_number: usize) -> Arc { let dir = RandomTempPath::new(); let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap(); - let test_spec = get_test_spec(); let test_engine = test_spec.to_engine().unwrap(); let state_root = test_engine.spec().genesis_header().state_root; @@ -77,10 +74,7 @@ pub fn generate_dummy_client(block_number: usize) -> Arc { } } - client.flush_queue(); client.import_verified_blocks(&IoChannel::disconnected()); - client - } \ No newline at end of file From 0cd2e855698ada995467b48767a2462e521dafa5 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 16:13:21 +0100 Subject: [PATCH 122/138] Remove flushlns and tidy ready for primetime. --- src/block_queue.rs | 2 -- src/client.rs | 5 ----- src/tests/chain.rs | 5 +---- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/src/block_queue.rs b/src/block_queue.rs index dab3a5fe3..5ce649710 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -158,7 +158,6 @@ impl BlockQueue { }, Err(err) => { let mut v = verification.lock().unwrap(); - flushln!("Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); v.bad.insert(block_hash.clone()); v.verifying.retain(|e| e.hash != block_hash); @@ -224,7 +223,6 @@ impl BlockQueue { Ok(h) }, Err(err) => { - flushln!("Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); self.verification.lock().unwrap().bad.insert(h.clone()); Err(From::from(err)) diff --git a/src/client.rs b/src/client.rs index 8aea437e4..33ba19147 100644 --- a/src/client.rs +++ b/src/client.rs @@ -199,7 +199,6 @@ impl Client { let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.write().unwrap().drain(128); for block in blocks { -// flushln!("Importing {}...", block.header.hash()); if bad.contains(&block.header.parent_hash) { self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); @@ -208,7 +207,6 @@ impl Client { let header = &block.header; if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { - flushln!("Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); @@ -217,7 +215,6 @@ impl Client { let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) { Some(p) => p, None => { - flushln!("Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); @@ -241,7 +238,6 @@ impl Client { let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { Ok(b) => b, Err(e) => { - flushln!("Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); bad.insert(block.header.hash()); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); @@ -249,7 +245,6 @@ impl Client { } }; if let Err(e) = verify_block_final(&header, result.block().header()) { - flushln!("Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); self.block_queue.write().unwrap().mark_as_bad(&header.hash()); break; diff --git a/src/tests/chain.rs b/src/tests/chain.rs index babd2a42f..42b4ee78a 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -33,11 +33,8 @@ fn do_json_test(json_data: &[u8]) -> Vec { { let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); for (b, is_valid) in blocks.into_iter() { - let mut hash = H256::new(); if Block::is_good(&b) { - if let Ok(h) = client.import_block(b.clone()) { - hash = h; - } + let _ = client.import_block(b.clone()); } client.flush_queue(); let imported_ok = client.import_verified_blocks(&IoChannel::disconnected()) > 0; From 765666faedd2932bb2a89c178b3554989ab8567b Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 16:37:32 +0100 Subject: [PATCH 123/138] Rwmove squeeze_to_fit --- src/blockchain.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/blockchain.rs b/src/blockchain.rs index da9ee04c2..fb3755ff2 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -565,15 +565,6 @@ impl BlockChain { } } - /// Tries to squeeze the cache if its too big. - pub fn squeeze_to_fit(&self, size: CacheSize) { - self.blocks.write().unwrap().squeeze(size.blocks); - self.block_details.write().unwrap().squeeze(size.block_details); - self.transaction_addresses.write().unwrap().squeeze(size.transaction_addresses); - self.block_logs.write().unwrap().squeeze(size.block_logs); - self.blocks_blooms.write().unwrap().squeeze(size.blocks_blooms); - } - /// Let the cache system know that a cacheable item has been used. fn note_used(&self, id: CacheID) { let mut cache_man = self.cache_man.write().unwrap(); From 1c67dfdaf9de4851d210833940531da6293f5c51 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 16:41:15 +0100 Subject: [PATCH 124/138] Fix network test. --- src/sync/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 78a5d7613..3223b69bf 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -9,13 +9,13 @@ /// extern crate ethcore; /// use std::env; /// use std::sync::Arc; -/// use util::network::NetworkService; +/// use util::network::{NetworkService,NetworkConfiguration}; /// use ethcore::client::Client; /// use ethcore::sync::EthSync; /// use ethcore::ethereum; /// /// fn main() { -/// let mut service = NetworkService::start().unwrap(); +/// let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); /// let dir = env::temp_dir(); /// let client = Client::new(ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); /// EthSync::register(&mut service, client); From 8a450592b49e92de42cc151f7de86c41dde2e95d Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 27 Jan 2016 19:41:50 +0400 Subject: [PATCH 125/138] refactoring of temp path spawning in several places --- src/blockchain.rs | 22 ++++++++-------------- src/tests/chain.rs | 8 +++----- src/tests/mod.rs | 2 +- 3 files changed, 12 insertions(+), 20 deletions(-) diff --git a/src/blockchain.rs b/src/blockchain.rs index fb3755ff2..753b83587 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -618,20 +618,18 @@ impl BlockChain { #[cfg(test)] mod tests { - use std::env; use std::str::FromStr; use rustc_serialize::hex::FromHex; use util::hash::*; use blockchain::*; + use tests::helpers::*; #[test] fn valid_tests_extra32() { let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0925002c3260b44e44c3edebad1cc442142b03020209df1ab8bb86752edbd2cd7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a0363659b251bf8b819179874c8cce7b9b983d7f3704cbb58a3b334431f7032871889032d09c281e1236c0c0".from_hex().unwrap(); - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - - let bc = BlockChain::new(&genesis, &dir); + let temp = RandomTempPath::new(); + let bc = BlockChain::new(&genesis, temp.as_path()); let genesis_hash = H256::from_str("3caa2203f3d7c136c0295ed128a7d31cea520b1ca5e27afe17d0853331798942").unwrap(); @@ -674,10 +672,8 @@ mod tests { // b3a is a part of canon chain, whereas b3b is part of sidechain let best_block_hash = H256::from_str("c208f88c9f5bf7e00840439742c12e5226d9752981f3ec0521bdcb6dd08af277").unwrap(); - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - - let bc = BlockChain::new(&genesis, &dir); + let temp = RandomTempPath::new(); + let bc = BlockChain::new(&genesis, temp.as_path()); bc.insert_block(&b1); bc.insert_block(&b2); bc.insert_block(&b3a); @@ -754,18 +750,16 @@ mod tests { let genesis_hash = H256::from_str("5716670833ec874362d65fea27a7cd35af5897d275b31a44944113111e4e96d2").unwrap(); let b1_hash = H256::from_str("437e51676ff10756fcfee5edd9159fa41dbcb1b2c592850450371cbecd54ee4f").unwrap(); - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - + let temp = RandomTempPath::new(); { - let bc = BlockChain::new(&genesis, &dir); + let bc = BlockChain::new(&genesis, temp.as_path()); assert_eq!(bc.best_block_hash(), genesis_hash); bc.insert_block(&b1); assert_eq!(bc.best_block_hash(), b1_hash); } { - let bc = BlockChain::new(&genesis, &dir); + let bc = BlockChain::new(&genesis, temp.as_path()); assert_eq!(bc.best_block_hash(), b1_hash); } } diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 42b4ee78a..295cd3e91 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -1,9 +1,9 @@ -use std::env; use super::test_common::*; use client::{BlockChainClient,Client}; use pod_state::*; use block::Block; use ethereum; +use super::helpers::*; fn do_json_test(json_data: &[u8]) -> Vec { let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid"); @@ -28,10 +28,9 @@ fn do_json_test(json_data: &[u8]) -> Vec { spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); assert!(spec.is_state_root_valid()); - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); + let temp = RandomTempPath::new(); { - let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); + let client = Client::new(spec, temp.as_path(), IoChannel::disconnected()).unwrap(); for (b, is_valid) in blocks.into_iter() { if Block::is_good(&b) { let _ = client.import_block(b.clone()); @@ -42,7 +41,6 @@ fn do_json_test(json_data: &[u8]) -> Vec { } fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); } - fs::remove_dir_all(&dir).unwrap(); } if !fail { flush(format!("ok\n")); diff --git a/src/tests/mod.rs b/src/tests/mod.rs index 17da72d77..19e8e9144 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -6,4 +6,4 @@ mod executive; mod state; mod client; mod chain; -mod helpers; \ No newline at end of file +pub mod helpers; \ No newline at end of file From f7a0f1056803fe6ef819040a236d04872f5aa332 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 16:58:53 +0100 Subject: [PATCH 126/138] missing methods --- src/sync/tests.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/sync/tests.rs b/src/sync/tests.rs index 50d6efab2..646933122 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -51,6 +51,10 @@ impl TestBlockChainClient { } impl BlockChainClient for TestBlockChainClient { + fn block_total_difficulty(&self, _h: &H256) -> Option { + unimplemented!(); + } + fn block_header(&self, h: &H256) -> Option { self.blocks.read().unwrap().get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec()) @@ -76,6 +80,10 @@ impl BlockChainClient for TestBlockChainClient { } } + fn block_total_difficulty_at(&self, _number: BlockNumber) -> Option { + unimplemented!(); + } + fn block_header_at(&self, n: BlockNumber) -> Option { self.numbers.read().unwrap().get(&(n as usize)).and_then(|h| self.block_header(h)) } From 61c64d264b6ee81c9f11be3bfc429f1d49ffa995 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 17:08:59 +0100 Subject: [PATCH 127/138] moved rust-evmjit -> evmjit, added clippy and basic docs to rpc crate --- Cargo.toml | 2 +- bin/src/main.rs | 1 - {rust-evmjit => evmjit}/.gitignore | 0 {rust-evmjit => evmjit}/Cargo.toml | 0 {rust-evmjit => evmjit}/src/lib.rs | 0 rpc/Cargo.toml | 1 + rpc/src/impls/eth.rs | 5 +++++ rpc/src/impls/net.rs | 2 ++ rpc/src/impls/web3.rs | 3 +++ rpc/src/lib.rs | 8 +++++++- rpc/src/traits/eth.rs | 1 + util/Cargo.toml | 2 +- 12 files changed, 21 insertions(+), 4 deletions(-) rename {rust-evmjit => evmjit}/.gitignore (100%) rename {rust-evmjit => evmjit}/Cargo.toml (100%) rename {rust-evmjit => evmjit}/src/lib.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 489c1f27e..872e1e675 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ heapsize = "0.2.0" rust-crypto = "0.2.34" time = "0.1" #interpolate_idents = { git = "https://github.com/SkylerLipthay/interpolate_idents" } -evmjit = { path = "rust-evmjit", optional = true } +evmjit = { path = "evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" clippy = "0.0.37" diff --git a/bin/src/main.rs b/bin/src/main.rs index 942a5cf24..402a0c7c1 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -1,6 +1,5 @@ #![feature(plugin)] #![plugin(docopt_macros)] -// required for serde, move it to a separate library extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; diff --git a/rust-evmjit/.gitignore b/evmjit/.gitignore similarity index 100% rename from rust-evmjit/.gitignore rename to evmjit/.gitignore diff --git a/rust-evmjit/Cargo.toml b/evmjit/Cargo.toml similarity index 100% rename from rust-evmjit/Cargo.toml rename to evmjit/Cargo.toml diff --git a/rust-evmjit/src/lib.rs b/evmjit/src/lib.rs similarity index 100% rename from rust-evmjit/src/lib.rs rename to evmjit/src/lib.rs diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 1f10180d6..ab7072c85 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -15,4 +15,5 @@ jsonrpc-core = "1.1" jsonrpc-http-server = "1.1" ethcore-util = { path = "../util" } ethcore = { path = ".." } +clippy = "0.0.37" diff --git a/rpc/src/impls/eth.rs b/rpc/src/impls/eth.rs index ac27111d6..91c05541a 100644 --- a/rpc/src/impls/eth.rs +++ b/rpc/src/impls/eth.rs @@ -1,3 +1,4 @@ +//! Eth rpc implementation. use std::sync::Arc; use jsonrpc_core::*; use util::hash::*; @@ -8,11 +9,13 @@ use ethcore::views::*; use traits::{Eth, EthFilter}; use types::Block; +/// Eth rpc implementation. pub struct EthClient { client: Arc, } impl EthClient { + /// Creates new EthClient. pub fn new(client: Arc) -> Self { EthClient { client: client @@ -100,11 +103,13 @@ impl Eth for EthClient { } } +/// Eth filter rpc implementation. pub struct EthFilterClient { client: Arc } impl EthFilterClient { + /// Creates new Eth filter client. pub fn new(client: Arc) -> Self { EthFilterClient { client: client diff --git a/rpc/src/impls/net.rs b/rpc/src/impls/net.rs index a1d36de54..20ed4d077 100644 --- a/rpc/src/impls/net.rs +++ b/rpc/src/impls/net.rs @@ -2,9 +2,11 @@ use jsonrpc_core::*; use traits::Net; +/// Net rpc implementation. pub struct NetClient; impl NetClient { + /// Creates new NetClient. pub fn new() -> Self { NetClient } } diff --git a/rpc/src/impls/web3.rs b/rpc/src/impls/web3.rs index 50eb9c6f5..0188aa179 100644 --- a/rpc/src/impls/web3.rs +++ b/rpc/src/impls/web3.rs @@ -1,9 +1,12 @@ +//! Web3 rpc implementation. use jsonrpc_core::*; use traits::Web3; +/// Web3 rpc implementation. pub struct Web3Client; impl Web3Client { + /// Creates new Web3Client. pub fn new() -> Self { Web3Client } } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 43a24a1fb..816eeb4a4 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -1,6 +1,8 @@ +//! Ethcore rpc. +#![warn(missing_docs)] #![feature(custom_derive, custom_attribute, plugin)] -#![feature(slice_patterns)] #![plugin(serde_macros)] +#![plugin(clippy)] extern crate serde; extern crate serde_json; @@ -22,12 +24,14 @@ mod types; pub use self::traits::{Web3, Eth, EthFilter, Net}; pub use self::impls::*; +/// Http server. pub struct HttpServer { handler: IoHandler, threads: usize } impl HttpServer { + /// Construct new http server object with given number of threads. pub fn new(threads: usize) -> HttpServer { HttpServer { handler: IoHandler::new(), @@ -35,10 +39,12 @@ impl HttpServer { } } + /// Add io delegate. pub fn add_delegate(&mut self, delegate: IoDelegate) where D: Send + Sync + 'static { self.handler.add_delegate(delegate); } + /// Start server asynchronously in new thread pub fn start_async(self, addr: &str) { let server = jsonrpc_http_server::Server::new(self.handler, self.threads); server.start_async(addr) diff --git a/rpc/src/traits/eth.rs b/rpc/src/traits/eth.rs index 63aadbc74..25756a713 100644 --- a/rpc/src/traits/eth.rs +++ b/rpc/src/traits/eth.rs @@ -44,6 +44,7 @@ pub trait Eth: Sized + Send + Sync + 'static { } } +/// Eth filters rpc api (polling). // TODO: do filters api properly pub trait EthFilter: Sized + Send + Sync + 'static { /// Returns id of new block filter diff --git a/util/Cargo.toml b/util/Cargo.toml index 362db33b2..d0e2e0ab7 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -26,7 +26,7 @@ crossbeam = "0.2" slab = { git = "https://github.com/arkpar/slab.git" } sha3 = { path = "sha3" } serde = "0.6.7" -clippy = "*" # Always newest, since we use nightly +clippy = "0.0.37" [dev-dependencies] json-tests = { path = "json-tests" } From 856c348e3eb39879761207bae2412e8b56cdacd2 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 17:14:41 +0100 Subject: [PATCH 128/138] moved old rpc implementation to v1/ dir --- bin/src/main.rs | 2 +- rpc/src/lib.rs | 7 +------ rpc/src/{ => v1}/impls/eth.rs | 4 ++-- rpc/src/{ => v1}/impls/mod.rs | 0 rpc/src/{ => v1}/impls/net.rs | 2 +- rpc/src/{ => v1}/impls/web3.rs | 2 +- rpc/src/v1/mod.rs | 10 ++++++++++ rpc/src/{ => v1}/traits/eth.rs | 0 rpc/src/{ => v1}/traits/mod.rs | 0 rpc/src/{ => v1}/traits/net.rs | 0 rpc/src/{ => v1}/traits/web3.rs | 0 rpc/src/{ => v1}/types/block.rs | 0 rpc/src/{ => v1}/types/mod.rs | 0 13 files changed, 16 insertions(+), 11 deletions(-) rename rpc/src/{ => v1}/impls/eth.rs (98%) rename rpc/src/{ => v1}/impls/mod.rs (100%) rename rpc/src/{ => v1}/impls/net.rs (94%) rename rpc/src/{ => v1}/impls/web3.rs (95%) create mode 100644 rpc/src/v1/mod.rs rename rpc/src/{ => v1}/traits/eth.rs (100%) rename rpc/src/{ => v1}/traits/mod.rs (100%) rename rpc/src/{ => v1}/traits/net.rs (100%) rename rpc/src/{ => v1}/traits/web3.rs (100%) rename rpc/src/{ => v1}/types/block.rs (100%) rename rpc/src/{ => v1}/types/mod.rs (100%) diff --git a/bin/src/main.rs b/bin/src/main.rs index 402a0c7c1..712635652 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -50,7 +50,7 @@ fn setup_log(init: &String) { #[cfg(feature = "rpc")] fn setup_rpc_server(client: Arc) { - use rpc::*; + use rpc::v1::*; let mut server = HttpServer::new(1); server.add_delegate(Web3Client::new().to_delegate()); diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 816eeb4a4..7bc1d6987 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -17,12 +17,7 @@ macro_rules! rpcerr { () => (Err(Error::internal_error())) } -pub mod traits; -mod impls; -mod types; - -pub use self::traits::{Web3, Eth, EthFilter, Net}; -pub use self::impls::*; +pub mod v1; /// Http server. pub struct HttpServer { diff --git a/rpc/src/impls/eth.rs b/rpc/src/v1/impls/eth.rs similarity index 98% rename from rpc/src/impls/eth.rs rename to rpc/src/v1/impls/eth.rs index 91c05541a..46718601b 100644 --- a/rpc/src/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -6,8 +6,8 @@ use util::uint::*; use util::sha3::*; use ethcore::client::*; use ethcore::views::*; -use traits::{Eth, EthFilter}; -use types::Block; +use v1::traits::{Eth, EthFilter}; +use v1::types::Block; /// Eth rpc implementation. pub struct EthClient { diff --git a/rpc/src/impls/mod.rs b/rpc/src/v1/impls/mod.rs similarity index 100% rename from rpc/src/impls/mod.rs rename to rpc/src/v1/impls/mod.rs diff --git a/rpc/src/impls/net.rs b/rpc/src/v1/impls/net.rs similarity index 94% rename from rpc/src/impls/net.rs rename to rpc/src/v1/impls/net.rs index 20ed4d077..7bf9cb248 100644 --- a/rpc/src/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -1,6 +1,6 @@ //! Net rpc implementation. use jsonrpc_core::*; -use traits::Net; +use v1::traits::Net; /// Net rpc implementation. pub struct NetClient; diff --git a/rpc/src/impls/web3.rs b/rpc/src/v1/impls/web3.rs similarity index 95% rename from rpc/src/impls/web3.rs rename to rpc/src/v1/impls/web3.rs index 0188aa179..5117ebf16 100644 --- a/rpc/src/impls/web3.rs +++ b/rpc/src/v1/impls/web3.rs @@ -1,6 +1,6 @@ //! Web3 rpc implementation. use jsonrpc_core::*; -use traits::Web3; +use v1::traits::Web3; /// Web3 rpc implementation. pub struct Web3Client; diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs new file mode 100644 index 000000000..a7da1a441 --- /dev/null +++ b/rpc/src/v1/mod.rs @@ -0,0 +1,10 @@ +//! Ethcore rpc v1. +//! +//! Compliant with ethereum rpc. + +pub mod traits; +mod impls; +mod types; + +pub use self::traits::{Web3, Eth, EthFilter, Net}; +pub use self::impls::*; diff --git a/rpc/src/traits/eth.rs b/rpc/src/v1/traits/eth.rs similarity index 100% rename from rpc/src/traits/eth.rs rename to rpc/src/v1/traits/eth.rs diff --git a/rpc/src/traits/mod.rs b/rpc/src/v1/traits/mod.rs similarity index 100% rename from rpc/src/traits/mod.rs rename to rpc/src/v1/traits/mod.rs diff --git a/rpc/src/traits/net.rs b/rpc/src/v1/traits/net.rs similarity index 100% rename from rpc/src/traits/net.rs rename to rpc/src/v1/traits/net.rs diff --git a/rpc/src/traits/web3.rs b/rpc/src/v1/traits/web3.rs similarity index 100% rename from rpc/src/traits/web3.rs rename to rpc/src/v1/traits/web3.rs diff --git a/rpc/src/types/block.rs b/rpc/src/v1/types/block.rs similarity index 100% rename from rpc/src/types/block.rs rename to rpc/src/v1/types/block.rs diff --git a/rpc/src/types/mod.rs b/rpc/src/v1/types/mod.rs similarity index 100% rename from rpc/src/types/mod.rs rename to rpc/src/v1/types/mod.rs From b93bf662b976de476d10b6061a43f5968f692dce Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 17:18:38 +0100 Subject: [PATCH 129/138] added Clippy to client executable, added missing docs --- bin/Cargo.toml | 1 + bin/src/main.rs | 8 ++++++-- rpc/Cargo.toml | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/bin/Cargo.toml b/bin/Cargo.toml index ba258b586..7174ada14 100644 --- a/bin/Cargo.toml +++ b/bin/Cargo.toml @@ -15,6 +15,7 @@ ctrlc = "1.0" ethcore-util = { path = "../util" } ethcore-rpc = { path = "../rpc", optional = true } ethcore = { path = ".." } +clippy = "0.0.37" [features] rpc = ["ethcore-rpc"] diff --git a/bin/src/main.rs b/bin/src/main.rs index 712635652..190bab311 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -1,5 +1,9 @@ +//! Ethcore client application. + +#![warn(missing_docs)] #![feature(plugin)] #![plugin(docopt_macros)] +#![plugin(clippy)] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -34,7 +38,7 @@ Options: -h --help Show this screen. "); -fn setup_log(init: &String) { +fn setup_log(init: &str) { let mut builder = LogBuilder::new(); builder.filter(None, LogLevelFilter::Info); @@ -52,7 +56,7 @@ fn setup_log(init: &String) { fn setup_rpc_server(client: Arc) { use rpc::v1::*; - let mut server = HttpServer::new(1); + let mut server = rpc::HttpServer::new(1); server.add_delegate(Web3Client::new().to_delegate()); server.add_delegate(EthClient::new(client.clone()).to_delegate()); server.add_delegate(EthFilterClient::new(client).to_delegate()); diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index ab7072c85..ee1c97f5f 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -3,7 +3,7 @@ description = "Ethcore jsonrpc" name = "ethcore-rpc" version = "0.1.0" license = "GPL-3.0" -authors = ["Marek Kotewicz Date: Wed, 27 Jan 2016 17:24:11 +0100 Subject: [PATCH 130/138] Add homestead & random tests. --- src/tests/chain.rs | 3 ++ src/tests/homestead_chain.rs | 67 ++++++++++++++++++++++++++++++++++++ src/tests/mod.rs | 1 + 3 files changed, 71 insertions(+) create mode 100644 src/tests/homestead_chain.rs diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 42b4ee78a..613d9f6ff 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -67,3 +67,6 @@ declare_test!{BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHe declare_test!{BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} declare_test!{BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} declare_test!{BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} + +declare_test!{BlockchainTests_RandomTests_bl10251623GO, "BlockchainTests/RandomTests/bl10251623GO"} +declare_test!{BlockchainTests_RandomTests_bl201507071825GO, "BlockchainTests/RandomTests/bl201507071825GO"} diff --git a/src/tests/homestead_chain.rs b/src/tests/homestead_chain.rs new file mode 100644 index 000000000..5d090aefb --- /dev/null +++ b/src/tests/homestead_chain.rs @@ -0,0 +1,67 @@ +use std::env; +use super::test_common::*; +use client::{BlockChainClient,Client}; +use pod_state::*; +use block::Block; +use ethereum; + +fn do_json_test(json_data: &[u8]) -> Vec { + let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid"); + let mut failed = Vec::new(); + + for (name, test) in json.as_object().unwrap() { + let mut fail = false; + { + let mut fail_unless = |cond: bool| if !cond && !fail { + failed.push(name.clone()); + flush(format!("FAIL\n")); + fail = true; + true + } else {false}; + + flush(format!(" - {}...", name)); + + let blocks: Vec<(Bytes, bool)> = test["blocks"].as_array().unwrap().iter().map(|e| (xjson!(&e["rlp"]), e.find("blockHeader").is_some())).collect(); + let mut spec = ethereum::new_homestead_test(); + let s = PodState::from_json(test.find("pre").unwrap()); + spec.set_genesis_state(s); + spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); + assert!(spec.is_state_root_valid()); + + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + { + let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); + for (b, is_valid) in blocks.into_iter() { + if Block::is_good(&b) { + let _ = client.import_block(b.clone()); + } + client.flush_queue(); + let imported_ok = client.import_verified_blocks(&IoChannel::disconnected()) > 0; + assert_eq!(imported_ok, is_valid); + } + fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); + } + fs::remove_dir_all(&dir).unwrap(); + } + if !fail { + flush(format!("ok\n")); + } + } + println!("!!! {:?} tests from failed.", failed.len()); + failed +} + +declare_test!{BlockchainTests_Homestead_bcBlockGasLimitTest, "BlockchainTests/Homestead/bcBlockGasLimitTest"} +declare_test!{BlockchainTests_Homestead_bcForkStressTest, "BlockchainTests/Homestead/bcForkStressTest"} +declare_test!{BlockchainTests_Homestead_bcGasPricerTest, "BlockchainTests/Homestead/bcGasPricerTest"} +declare_test!{BlockchainTests_Homestead_bcInvalidHeaderTest, "BlockchainTests/Homestead/bcInvalidHeaderTest"} +declare_test!{BlockchainTests_Homestead_bcInvalidRLPTest, "BlockchainTests/Homestead/bcInvalidRLPTest"} +declare_test!{BlockchainTests_Homestead_bcMultiChainTest, "BlockchainTests/Homestead/bcMultiChainTest"} +declare_test!{BlockchainTests_Homestead_bcRPC_API_Test, "BlockchainTests/Homestead/bcRPC_API_Test"} +declare_test!{BlockchainTests_Homestead_bcStateTest, "BlockchainTests/Homestead/bcStateTest"} +declare_test!{BlockchainTests_Homestead_bcTotalDifficultyTest, "BlockchainTests/Homestead/bcTotalDifficultyTest"} +declare_test!{BlockchainTests_Homestead_bcUncleHeaderValiditiy, "BlockchainTests/Homestead/bcUncleHeaderValiditiy"} +declare_test!{BlockchainTests_Homestead_bcUncleTest, "BlockchainTests/Homestead/bcUncleTest"} +declare_test!{BlockchainTests_Homestead_bcValidBlockTest, "BlockchainTests/Homestead/bcValidBlockTest"} +declare_test!{BlockchainTests_Homestead_bcWalletTest, "BlockchainTests/Homestead/bcWalletTest"} diff --git a/src/tests/mod.rs b/src/tests/mod.rs index 17da72d77..73dc6ef9a 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -6,4 +6,5 @@ mod executive; mod state; mod client; mod chain; +mod homestead_chain; mod helpers; \ No newline at end of file From 382b22b93b997d944ea91a4fd328abcdee502b47 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Wed, 27 Jan 2016 17:25:19 +0100 Subject: [PATCH 131/138] Fixing suicide with self-refund to be consistent with CPP. --- src/externalities.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/externalities.rs b/src/externalities.rs index f1b8c1958..6a874330c 100644 --- a/src/externalities.rs +++ b/src/externalities.rs @@ -215,8 +215,13 @@ impl<'a> Ext for Externalities<'a> { fn suicide(&mut self, refund_address: &Address) { let address = self.origin_info.address.clone(); let balance = self.balance(&address); - trace!("Suiciding {} -> {} (xfer: {})", address, refund_address, balance); - self.state.transfer_balance(&address, refund_address, &balance); + if &address == refund_address { + // TODO [todr] To be consisted with CPP client we set balance to 0 in that case. + self.state.sub_balance(&address, &balance); + } else { + trace!("Suiciding {} -> {} (xfer: {})", address, refund_address, balance); + self.state.transfer_balance(&address, refund_address, &balance); + } self.substate.suicides.insert(address); } From dec9c3a642a334f9396e8632b063515802c473a5 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Wed, 27 Jan 2016 17:32:12 +0100 Subject: [PATCH 132/138] Remove code duplication. --- src/tests/chain.rs | 16 +++++++++-- src/tests/homestead_chain.rs | 51 ++---------------------------------- 2 files changed, 16 insertions(+), 51 deletions(-) diff --git a/src/tests/chain.rs b/src/tests/chain.rs index 613d9f6ff..ca7884315 100644 --- a/src/tests/chain.rs +++ b/src/tests/chain.rs @@ -5,7 +5,12 @@ use pod_state::*; use block::Block; use ethereum; -fn do_json_test(json_data: &[u8]) -> Vec { +pub enum ChainEra { + Frontier, + Homestead, +} + +pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec { let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid"); let mut failed = Vec::new(); @@ -22,7 +27,10 @@ fn do_json_test(json_data: &[u8]) -> Vec { flush(format!(" - {}...", name)); let blocks: Vec<(Bytes, bool)> = test["blocks"].as_array().unwrap().iter().map(|e| (xjson!(&e["rlp"]), e.find("blockHeader").is_some())).collect(); - let mut spec = ethereum::new_frontier_like_test(); + let mut spec = match era { + ChainEra::Frontier => ethereum::new_frontier_test(), + ChainEra::Homestead => ethereum::new_homestead_test(), + }; let s = PodState::from_json(test.find("pre").unwrap()); spec.set_genesis_state(s); spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); @@ -52,6 +60,10 @@ fn do_json_test(json_data: &[u8]) -> Vec { failed } +fn do_json_test(json_data: &[u8]) -> Vec { + json_chain_test(json_data, ChainEra::Frontier) +} + declare_test!{BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"} declare_test!{BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} diff --git a/src/tests/homestead_chain.rs b/src/tests/homestead_chain.rs index 5d090aefb..a9f544d8f 100644 --- a/src/tests/homestead_chain.rs +++ b/src/tests/homestead_chain.rs @@ -1,55 +1,8 @@ -use std::env; use super::test_common::*; -use client::{BlockChainClient,Client}; -use pod_state::*; -use block::Block; -use ethereum; +use super::chain::{ChainEra, json_chain_test}; fn do_json_test(json_data: &[u8]) -> Vec { - let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid"); - let mut failed = Vec::new(); - - for (name, test) in json.as_object().unwrap() { - let mut fail = false; - { - let mut fail_unless = |cond: bool| if !cond && !fail { - failed.push(name.clone()); - flush(format!("FAIL\n")); - fail = true; - true - } else {false}; - - flush(format!(" - {}...", name)); - - let blocks: Vec<(Bytes, bool)> = test["blocks"].as_array().unwrap().iter().map(|e| (xjson!(&e["rlp"]), e.find("blockHeader").is_some())).collect(); - let mut spec = ethereum::new_homestead_test(); - let s = PodState::from_json(test.find("pre").unwrap()); - spec.set_genesis_state(s); - spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap()); - assert!(spec.is_state_root_valid()); - - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - { - let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap(); - for (b, is_valid) in blocks.into_iter() { - if Block::is_good(&b) { - let _ = client.import_block(b.clone()); - } - client.flush_queue(); - let imported_ok = client.import_verified_blocks(&IoChannel::disconnected()) > 0; - assert_eq!(imported_ok, is_valid); - } - fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"])); - } - fs::remove_dir_all(&dir).unwrap(); - } - if !fail { - flush(format!("ok\n")); - } - } - println!("!!! {:?} tests from failed.", failed.len()); - failed + json_chain_test(json_data, ChainEra::Homestead) } declare_test!{BlockchainTests_Homestead_bcBlockGasLimitTest, "BlockchainTests/Homestead/bcBlockGasLimitTest"} From 09b9001c65279c0449635f17ecd65e5b2322ce34 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 18:17:20 +0100 Subject: [PATCH 133/138] stub for rpc eth methods --- rpc/src/lib.rs | 4 -- rpc/src/v1/traits/eth.rs | 132 +++++++++++++++++++++++++++++++------- rpc/src/v1/traits/mod.rs | 5 ++ rpc/src/v1/traits/net.rs | 4 +- rpc/src/v1/traits/web3.rs | 2 +- 5 files changed, 116 insertions(+), 31 deletions(-) diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 7bc1d6987..a27afb5f6 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -13,10 +13,6 @@ extern crate ethcore; use self::jsonrpc_core::{IoHandler, IoDelegate}; -macro_rules! rpcerr { - () => (Err(Error::internal_error())) -} - pub mod v1; /// Http server. diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 25756a713..1586e7069 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -5,41 +5,125 @@ use jsonrpc_core::*; /// Eth rpc interface. pub trait Eth: Sized + Send + Sync + 'static { /// Returns protocol version. - fn protocol_version(&self, _: Params) -> Result { rpcerr!() } - - /// Returns block author. - fn author(&self, _: Params) -> Result { rpcerr!() } - - /// Returns current gas_price. - fn gas_price(&self, _: Params) -> Result { rpcerr!() } - - /// Returns highest block number. - fn block_number(&self, _: Params) -> Result { rpcerr!() } - - /// Returns block with given index / hash. - fn block(&self, _: Params) -> Result { rpcerr!() } - - /// Returns true if client is actively mining new blocks. - fn is_mining(&self, _: Params) -> Result { rpcerr!() } + fn protocol_version(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns the number of hashes per second that the node is mining with. - fn hashrate(&self, _: Params) -> Result { rpcerr!() } + fn hashrate(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns block author. + fn author(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns true if client is actively mining new blocks. + fn is_mining(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns current gas_price. + fn gas_price(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns accounts list. + fn accounts(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns highest block number. + fn block_number(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns balance of the given account. + fn balance(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns content of the storage at given address. + fn storage_at(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns block with given index / hash. + fn block(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of transactions sent from given address at given time (block number). + fn transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns the number of transactions in a block. - fn block_transaction_count(&self, _: Params) -> Result { rpcerr!() } + fn block_transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of uncles in a given block. + fn block_uncles_count(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the code at given address at given time (block number). + fn code_at(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Sends transaction. + fn send_transaction(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Call contract. + fn call(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Estimate gas needed for execution of given contract. + fn estimate_gas(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns transaction at given block and index. + fn transaction_at(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns transaction receipt. + fn transaction_receipt(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns an uncles at given block and index. + fn uncle_at(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns available compilers. + fn compilers(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Compiles lll code. + fn compile_lll(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Compiles solidity. + fn compile_solidity(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Compiles serpent. + fn compile_serpent(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns logs matching given filter object. + fn logs(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. + fn work(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Used for submitting a proof-of-work solution. + fn submit_work(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Used for submitting mining hashrate. + fn submit_hashrate(&self, _: Params) -> Result { rpc_unimplemented!() } /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); delegate.add_method("eth_protocolVersion", Eth::protocol_version); + delegate.add_method("eth_hashrate", Eth::hashrate); delegate.add_method("eth_coinbase", Eth::author); + delegate.add_method("eth_mining", Eth::is_mining); delegate.add_method("eth_gasPrice", Eth::gas_price); + delegate.add_method("eth_accounts", Eth::accounts); delegate.add_method("eth_blockNumber", Eth::block_number); + delegate.add_method("eth_balance", Eth::balance); + delegate.add_method("eth_getStorageAt", Eth::storage_at); + delegate.add_method("eth_getTransactionCount", Eth::transaction_count); + delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count); + delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); + delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count); + delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count); + delegate.add_method("eth_code", Eth::code_at); + delegate.add_method("eth_sendTransaction", Eth::send_transaction); + delegate.add_method("eth_call", Eth::call); + delegate.add_method("eth_estimateGas", Eth::estimate_gas); delegate.add_method("eth_getBlockByHash", Eth::block); delegate.add_method("eth_getBlockByNumber", Eth::block); - delegate.add_method("eth_mining", Eth::is_mining); - delegate.add_method("eth_hashrate", Eth::hashrate); - delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); + delegate.add_method("eth_getTransactionByBlockHashAndIndex", Eth::transaction_at); + delegate.add_method("eth_getTransactionByBlockNumberAndIndex", Eth::transaction_at); + delegate.add_method("eth_getTransactionReceipt", Eth::transaction_receipt); + delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_at); + delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_at); + delegate.add_method("eth_getCompilers", Eth::compilers); + delegate.add_method("eth_compileLLL", Eth::compile_lll); + delegate.add_method("eth_compileSolidity", Eth::compile_solidity); + delegate.add_method("eth_compileSerpent", Eth::compile_serpent); + delegate.add_method("eth_getLogs", Eth::logs); + delegate.add_method("eth_getWork", Eth::work); + delegate.add_method("eth_submitWork", Eth::submit_work); + delegate.add_method("eth_submitHashrate", Eth::submit_hashrate); delegate } } @@ -48,13 +132,13 @@ pub trait Eth: Sized + Send + Sync + 'static { // TODO: do filters api properly pub trait EthFilter: Sized + Send + Sync + 'static { /// Returns id of new block filter - fn new_block_filter(&self, _: Params) -> Result { rpcerr!() } + fn new_block_filter(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns id of new block filter - fn new_pending_transaction_filter(&self, _: Params) -> Result { rpcerr!() } + fn new_pending_transaction_filter(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns filter changes since last poll - fn filter_changes(&self, _: Params) -> Result { rpcerr!() } + fn filter_changes(&self, _: Params) -> Result { rpc_unimplemented!() } /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { diff --git a/rpc/src/v1/traits/mod.rs b/rpc/src/v1/traits/mod.rs index 2fa52d538..70c825175 100644 --- a/rpc/src/v1/traits/mod.rs +++ b/rpc/src/v1/traits/mod.rs @@ -1,4 +1,9 @@ //! Ethereum rpc interfaces. + +macro_rules! rpc_unimplemented { + () => (Err(Error::internal_error())) +} + pub mod web3; pub mod eth; pub mod net; diff --git a/rpc/src/v1/traits/net.rs b/rpc/src/v1/traits/net.rs index 4df8d7114..9196503d2 100644 --- a/rpc/src/v1/traits/net.rs +++ b/rpc/src/v1/traits/net.rs @@ -5,10 +5,10 @@ use jsonrpc_core::*; /// Net rpc interface. pub trait Net: Sized + Send + Sync + 'static { /// Returns protocol version. - fn version(&self, _: Params) -> Result { rpcerr!() } + fn version(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns number of peers connected to node. - fn peer_count(&self, _: Params) -> Result { rpcerr!() } + fn peer_count(&self, _: Params) -> Result { rpc_unimplemented!() } /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { diff --git a/rpc/src/v1/traits/web3.rs b/rpc/src/v1/traits/web3.rs index 8e73d4304..118316155 100644 --- a/rpc/src/v1/traits/web3.rs +++ b/rpc/src/v1/traits/web3.rs @@ -5,7 +5,7 @@ use jsonrpc_core::*; /// Web3 rpc interface. pub trait Web3: Sized + Send + Sync + 'static { /// Returns current client version. - fn client_version(&self, _: Params) -> Result { rpcerr!() } + fn client_version(&self, _: Params) -> Result { rpc_unimplemented!() } /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { From b13d68c7e99319420b0ee500308635442c8594d2 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 19:00:42 +0100 Subject: [PATCH 134/138] missing eth filter interface --- rpc/src/v1/traits/eth.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 1586e7069..3dcdfdf05 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -131,21 +131,33 @@ pub trait Eth: Sized + Send + Sync + 'static { /// Eth filters rpc api (polling). // TODO: do filters api properly pub trait EthFilter: Sized + Send + Sync + 'static { - /// Returns id of new block filter + /// Returns id of new filter. + fn new_filter(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns id of new block filter. fn new_block_filter(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns id of new block filter + /// Returns id of new block filter. fn new_pending_transaction_filter(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns filter changes since last poll + /// Returns filter changes since last poll. fn filter_changes(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns filter logs. + fn filter_logs(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Uninstalls filter. + fn uninstall_filter(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); + delegate.add_method("eth_newFilter", EthFilter::new_filter); delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter); delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter); delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes); + delegate.add_method("eth_getFilterLogs", EthFilter::filter_logs); + delegate.add_method("eth_uninstallFilter", EthFilter::uninstall_filter); delegate } } From f8568e10b7fe4ffbc525188ca360cd8e09fd55e8 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 27 Jan 2016 19:03:12 +0100 Subject: [PATCH 135/138] missing net_ methods --- rpc/src/v1/traits/net.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rpc/src/v1/traits/net.rs b/rpc/src/v1/traits/net.rs index 9196503d2..84877cab7 100644 --- a/rpc/src/v1/traits/net.rs +++ b/rpc/src/v1/traits/net.rs @@ -10,11 +10,16 @@ pub trait Net: Sized + Send + Sync + 'static { /// Returns number of peers connected to node. fn peer_count(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns true if client is actively listening for network connections. + /// Otherwise false. + fn is_listening(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); delegate.add_method("net_version", Net::version); delegate.add_method("net_peerCount", Net::peer_count); + delegate.add_method("net_listening", Net::is_listening); delegate } } From 07d6965ce70f9845ed8b577ff7c1dd98640442c9 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 28 Jan 2016 10:54:49 +0100 Subject: [PATCH 136/138] rpc net methods returns real peer count && protocol version --- bin/src/main.rs | 8 ++++---- rpc/src/v1/impls/net.rs | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/bin/src/main.rs b/bin/src/main.rs index 190bab311..3d4199fcf 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -53,19 +53,19 @@ fn setup_log(init: &str) { #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc) { +fn setup_rpc_server(client: Arc, sync: Arc) { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); server.add_delegate(Web3Client::new().to_delegate()); server.add_delegate(EthClient::new(client.clone()).to_delegate()); server.add_delegate(EthFilterClient::new(client).to_delegate()); - server.add_delegate(NetClient::new().to_delegate()); + server.add_delegate(NetClient::new(sync).to_delegate()); server.start_async("127.0.0.1:3030"); } #[cfg(not(feature = "rpc"))] -fn setup_rpc_server(_client: Arc) { +fn setup_rpc_server(_client: Arc, _sync: Arc) { } fn main() { @@ -81,7 +81,7 @@ fn main() { let mut net_settings = NetworkConfiguration::new(); net_settings.boot_nodes = init_nodes; let mut service = ClientService::start(spec, net_settings).unwrap(); - setup_rpc_server(service.client()); + setup_rpc_server(service.client(), service.sync()); let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), sync: service.sync() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 7bf9cb248..dff351c33 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -1,21 +1,29 @@ //! Net rpc implementation. +use std::sync::Arc; use jsonrpc_core::*; +use ethcore::sync::EthSync; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient; +pub struct NetClient { + sync: Arc +} impl NetClient { /// Creates new NetClient. - pub fn new() -> Self { NetClient } + pub fn new(sync: Arc) -> Self { + NetClient { + sync: sync + } + } } impl Net for NetClient { fn version(&self, _: Params) -> Result { - Ok(Value::U64(63)) + Ok(Value::U64(self.sync.status().protocol_version as u64)) } fn peer_count(&self, _params: Params) -> Result { - Ok(Value::U64(0)) + Ok(Value::U64(self.sync.status().num_peers as u64)) } } From 87edeae078593f9c8511fb0a3ece9e48f0cedd44 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 28 Jan 2016 11:21:06 +0100 Subject: [PATCH 137/138] fixed tests submodule branch --- .gitmodules | 1 + res/ethereum/tests | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 84843f000..4174eb2e1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,4 @@ [submodule "res/ethereum/tests"] path = res/ethereum/tests url = git@github.com:ethereum/tests + branch = develop diff --git a/res/ethereum/tests b/res/ethereum/tests index dc86e6359..c670b1d8c 160000 --- a/res/ethereum/tests +++ b/res/ethereum/tests @@ -1 +1 @@ -Subproject commit dc86e6359675440aea59ddb48648a01c799925d8 +Subproject commit c670b1d8c9f09593a6758ab2c099360e16c7c25b From c2134993a5266848ba3fb4089d78c567d0dbe050 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 28 Jan 2016 18:37:14 +0100 Subject: [PATCH 138/138] fix submodule version --- res/ethereum/tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/res/ethereum/tests b/res/ethereum/tests index e838fd909..c670b1d8c 160000 --- a/res/ethereum/tests +++ b/res/ethereum/tests @@ -1 +1 @@ -Subproject commit e838fd90998fc5502d0b7c9427a4c231f9a6953d +Subproject commit c670b1d8c9f09593a6758ab2c099360e16c7c25b