From 193d615f9acd80fb674866491442a558af9f6196 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 12:41:31 +0100 Subject: [PATCH 01/28] First JournalDB implementation. --- util/src/error.rs | 7 +++ util/src/journaldb.rs | 123 ++++++++++++++++++++++++++++++++++++++++++ util/src/lib.rs | 1 + util/src/overlaydb.rs | 15 +++--- 4 files changed, 139 insertions(+), 7 deletions(-) create mode 100644 util/src/journaldb.rs diff --git a/util/src/error.rs b/util/src/error.rs index 04f7b96ce..d9687183d 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -22,6 +22,7 @@ pub enum UtilError { BaseData(BaseDataError), Network(NetworkError), Decoder(DecoderError), + SimpleString(String), BadSize, } @@ -73,6 +74,12 @@ impl From<::rlp::DecoderError> for UtilError { } } +impl From for UtilError { + fn from(err: String) -> UtilError { + UtilError::SimpleString(err) + } +} + // TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted. /*#![feature(concat_idents)] macro_rules! assimilate { diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs new file mode 100644 index 000000000..2dda47eba --- /dev/null +++ b/util/src/journaldb.rs @@ -0,0 +1,123 @@ +//! Disk-backed HashDB implementation. + +use std::env; +use common::*; +use rlp::*; +use hashdb::*; +use overlaydb::*; +use rocksdb::{DB, Writable}; + +#[derive(Clone)] +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct JournalDB { + forward: OverlayDB, + backing: Arc, + inserts: Vec, + removes: Vec, +} + +impl JournalDB { + /// Create a new instance given a `backing` database. + pub fn new(backing: DB) -> JournalDB { + let db = Arc::new(backing); + // TODO: check it doesn't overwrite anything before. + // TODO: proper handling of errors (return ) + JournalDB { + forward: OverlayDB::new_with_arc(db.clone()), + backing: db, + inserts: vec![], + removes: vec![], + } + } + + /// Create a new instance with an anonymous temporary database. + pub fn new_temp() -> JournalDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(DB::open_default(dir.to_str().unwrap()).unwrap()) + } + + /// Get a clone of the overlay db portion of this. + pub fn to_overlaydb(&self) -> OverlayDB { self.forward.clone() } + + /// Commit all recent insert operations and historical removals from the old era + /// to the backing database. + pub fn commit(&mut self, now: u64, id: &H256, end_era: u64, canon_id: &H256) -> Result { + // journal format: + // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, n] => [ ... ] + + // TODO: store last_era, reclaim_period. + + // when we make a new commit, we journal the inserts and removes. + // for each end_era that we journaled that we are no passing by, + // we remove all of its removes assuming it is canonical and all + // of its inserts otherwise. + + // record new commit's details. + { + let mut index = 0usize; + let mut last; + + while try!(self.backing.get({ + let mut r = RlpStream::new_list(2); + r.append(&now); + r.append(&index); + last = r.out(); + &last + })).is_some() { + index += 1; + } + + let mut r = RlpStream::new_list(3); + r.append(id); + r.append(&self.inserts); + r.append(&self.removes); + try!(self.backing.put(&last, &r.out())); + } + + // apply old commits' details + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = try!(self.backing.get({ + let mut r = RlpStream::new_list(2); + r.append(&end_era); + r.append(&index); + last = r.out(); + &last + })) { + let rlp = Rlp::new(&rlp_data); + let to_remove: Vec = rlp.val_at(if *canon_id == rlp.val_at(0) {2} else {1}); + for i in to_remove.iter() { + self.forward.remove(i); + } + try!(self.backing.delete(&last)); + index += 1; + } + + self.inserts.clear(); + self.removes.clear(); + + self.forward.commit() + } + + /// Revert all operations on this object (i.e. `insert()`s and `removes()`s) since the + /// last `commit()`. + pub fn revert(&mut self) { self.forward.revert(); self.removes.clear(); } +} + +impl HashDB for JournalDB { + fn keys(&self) -> HashMap { self.forward.keys() } + fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) } + fn exists(&self, key: &H256) -> bool { self.forward.exists(key) } + fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r } + fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } + fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); } +} diff --git a/util/src/lib.rs b/util/src/lib.rs index 4bc47e61c..34961f1de 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -69,6 +69,7 @@ pub mod sha3; pub mod hashdb; pub mod memorydb; pub mod overlaydb; +pub mod journaldb; pub mod math; pub mod chainfilter; pub mod crypto; diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 1006cd28c..6330f1e25 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -15,11 +15,11 @@ use rocksdb::{DB, Writable, IteratorMode}; #[derive(Clone)] /// Implementation of the HashDB trait for a disk-backed database with a memory overlay. /// -/// The operations `insert()` and `kill()` take place on the memory overlay; batches of +/// The operations `insert()` and `remove()` take place on the memory overlay; batches of /// such operations may be flushed to the disk-backed DB with `commit()` or discarded with /// `revert()`. /// -/// `lookup()` and `exists()` maintain normal behaviour - all `insert()` and `kill()` +/// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// queries have an immediate effect in terms of these functions. pub struct OverlayDB { overlay: MemoryDB, @@ -28,8 +28,11 @@ pub struct OverlayDB { impl OverlayDB { /// Create a new instance of OverlayDB given a `backing` database. - pub fn new(backing: DB) -> OverlayDB { - OverlayDB{ overlay: MemoryDB::new(), backing: Arc::new(backing) } + pub fn new(backing: DB) -> OverlayDB { Self::new_with_arc(Arc::new(backing)) } + + /// Create a new instance of OverlayDB given a `backing` database. + pub fn new_with_arc(backing: Arc) -> OverlayDB { + OverlayDB{ overlay: MemoryDB::new(), backing: backing } } /// Create a new instance of OverlayDB with an anonymous temporary database. @@ -70,9 +73,7 @@ impl OverlayDB { let mut ret = 0u32; for i in self.overlay.drain().into_iter() { let (key, (value, rc)) = i; - // until we figure out state trie pruning, only commit stuff when it has a strictly positive delkta of RCs - - // this prevents RCs being reduced to 0 where the DB would pretent that the node had been removed. - if rc > 0 { + if rc != 0 { match self.payload(&key) { Some(x) => { let (back_value, back_rc) = x; From 28c07cba52cac3cc95c20b9339831b41dc94482d Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 13:30:01 +0100 Subject: [PATCH 02/28] JournalDB passing all tests. --- util/src/journaldb.rs | 132 +++++++++++++++++++++++++++++++++++------- 1 file changed, 111 insertions(+), 21 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 2dda47eba..011d46663 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -26,8 +26,6 @@ impl JournalDB { /// Create a new instance given a `backing` database. pub fn new(backing: DB) -> JournalDB { let db = Arc::new(backing); - // TODO: check it doesn't overwrite anything before. - // TODO: proper handling of errors (return ) JournalDB { forward: OverlayDB::new_with_arc(db.clone()), backing: db, @@ -48,7 +46,7 @@ impl JournalDB { /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - pub fn commit(&mut self, now: u64, id: &H256, end_era: u64, canon_id: &H256) -> Result { + pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, &H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -81,30 +79,31 @@ impl JournalDB { r.append(&self.inserts); r.append(&self.removes); try!(self.backing.put(&last, &r.out())); + self.inserts.clear(); + self.removes.clear(); } // apply old commits' details - let mut index = 0usize; - let mut last; - while let Some(rlp_data) = try!(self.backing.get({ - let mut r = RlpStream::new_list(2); - r.append(&end_era); - r.append(&index); - last = r.out(); - &last - })) { - let rlp = Rlp::new(&rlp_data); - let to_remove: Vec = rlp.val_at(if *canon_id == rlp.val_at(0) {2} else {1}); - for i in to_remove.iter() { - self.forward.remove(i); + if let Some((end_era, canon_id)) = end { + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = try!(self.backing.get({ + let mut r = RlpStream::new_list(2); + r.append(&end_era); + r.append(&index); + last = r.out(); + &last + })) { + let rlp = Rlp::new(&rlp_data); + let to_remove: Vec = rlp.val_at(if *canon_id == rlp.val_at(0) {2} else {1}); + for i in to_remove.iter() { + self.forward.remove(i); + } + try!(self.backing.delete(&last)); + index += 1; } - try!(self.backing.delete(&last)); - index += 1; } - self.inserts.clear(); - self.removes.clear(); - self.forward.commit() } @@ -121,3 +120,94 @@ impl HashDB for JournalDB { fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); } } + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use hashdb::*; + + #[test] + fn long_history() { + // history is 3 + let mut jdb = JournalDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, &b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, &b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = JournalDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, &b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, &b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, &b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, &b"3".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = JournalDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, &b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, &b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, &b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } +} From b9b08af518e6e8a4a6a36b3a89695cf7c6208c28 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 13:54:46 +0100 Subject: [PATCH 03/28] Use JournalDB instead of OverlayDB. --- src/account.rs | 10 +++++----- src/block.rs | 16 ++++++++-------- src/blockchain.rs | 2 +- src/client.rs | 14 +++++++++----- src/ethereum/ethash.rs | 4 ++-- src/ethereum/mod.rs | 2 +- src/spec.rs | 5 +++-- src/state.rs | 12 ++++++------ util/src/journaldb.rs | 4 ++-- util/src/lib.rs | 1 + util/src/overlaydb.rs | 13 +++++++++---- 11 files changed, 47 insertions(+), 36 deletions(-) diff --git a/src/account.rs b/src/account.rs index 8c36c7cbd..c64e80ed8 100644 --- a/src/account.rs +++ b/src/account.rs @@ -236,7 +236,7 @@ mod tests { #[test] fn storage_at() { - let mut db = OverlayDB::new_temp(); + let mut db = MemoryDB::new_temp(); let rlp = { let mut a = Account::new_contract(U256::from(69u8)); a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64))); @@ -254,7 +254,7 @@ mod tests { #[test] fn note_code() { - let mut db = OverlayDB::new_temp(); + let mut db = MemoryDB::new_temp(); let rlp = { let mut a = Account::new_contract(U256::from(69u8)); @@ -273,7 +273,7 @@ mod tests { #[test] fn commit_storage() { let mut a = Account::new_contract(U256::from(69u8)); - let mut db = OverlayDB::new_temp(); + let mut db = MemoryDB::new_temp(); a.set_storage(x!(0), x!(0x1234)); assert_eq!(a.storage_root(), None); a.commit_storage(&mut db); @@ -283,7 +283,7 @@ mod tests { #[test] fn commit_remove_commit_storage() { let mut a = Account::new_contract(U256::from(69u8)); - let mut db = OverlayDB::new_temp(); + let mut db = MemoryDB::new_temp(); a.set_storage(x!(0), x!(0x1234)); a.commit_storage(&mut db); a.set_storage(x!(1), x!(0x1234)); @@ -296,7 +296,7 @@ mod tests { #[test] fn commit_code() { let mut a = Account::new_contract(U256::from(69u8)); - let mut db = OverlayDB::new_temp(); + let mut db = MemoryDB::new_temp(); a.init_code(vec![0x55, 0x44, 0xffu8]); assert_eq!(a.code_hash(), SHA3_EMPTY); a.commit_code(&mut db); diff --git a/src/block.rs b/src/block.rs index d149d6132..d47fccc26 100644 --- a/src/block.rs +++ b/src/block.rs @@ -103,7 +103,7 @@ pub struct SealedBlock { impl<'x, 'y> OpenBlock<'x, 'y> { /// Create a new OpenBlock ready for transaction pushing. - pub fn new<'a, 'b>(engine: &'a Engine, db: OverlayDB, parent: &Header, last_hashes: &'b LastHashes, author: Address, extra_data: Bytes) -> OpenBlock<'a, 'b> { + pub fn new<'a, 'b>(engine: &'a Engine, db: JournalDB, parent: &Header, last_hashes: &'b LastHashes, author: Address, extra_data: Bytes) -> OpenBlock<'a, 'b> { let mut r = OpenBlock { block: Block::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, @@ -241,7 +241,7 @@ impl<'x, 'y> ClosedBlock<'x, 'y> { pub fn reopen(self) -> OpenBlock<'x, 'y> { self.open_block } /// Drop this object and return the underlieing database. - pub fn drain(self) -> OverlayDB { self.open_block.block.state.drop().1 } + pub fn drain(self) -> JournalDB { self.open_block.block.state.drop().1 } } impl SealedBlock { @@ -256,7 +256,7 @@ impl SealedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> OverlayDB { self.block.state.drop().1 } + pub fn drain(self) -> JournalDB { self.block.state.drop().1 } } impl IsBlock for SealedBlock { @@ -264,7 +264,7 @@ impl IsBlock for SealedBlock { } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact<'x, 'y>(block_bytes: &[u8], engine: &'x Engine, db: OverlayDB, parent: &Header, last_hashes: &'y LastHashes) -> Result, Error> { +pub fn enact<'x, 'y>(block_bytes: &[u8], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: &'y LastHashes) -> Result, Error> { { let header = BlockView::new(block_bytes).header_view(); let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); @@ -284,7 +284,7 @@ pub fn enact<'x, 'y>(block_bytes: &[u8], engine: &'x Engine, db: OverlayDB, pare } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: OverlayDB, parent: &Header, last_hashes: &LastHashes) -> Result { +pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: &LastHashes) -> Result { let header = BlockView::new(block_bytes).header_view(); Ok(try!(try!(enact(block_bytes, engine, db, parent, last_hashes)).seal(header.seal()))) } @@ -294,7 +294,7 @@ fn open_block() { use spec::*; let engine = Spec::new_test().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); - let mut db = OverlayDB::new_temp(); + let mut db = JournalDB::new_temp(); engine.spec().ensure_db_good(&mut db); let last_hashes = vec![genesis_header.hash()]; let b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); @@ -308,13 +308,13 @@ fn enact_block() { let engine = Spec::new_test().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); - let mut db = OverlayDB::new_temp(); + let mut db = JournalDB::new_temp(); engine.spec().ensure_db_good(&mut db); let b = OpenBlock::new(engine.deref(), db, &genesis_header, &vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); - let mut db = OverlayDB::new_temp(); + let mut db = JournalDB::new_temp(); engine.spec().ensure_db_good(&mut db); let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, &vec![genesis_header.hash()]).unwrap(); diff --git a/src/blockchain.rs b/src/blockchain.rs index f08d15057..f19b958c5 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -251,7 +251,7 @@ impl BlockChain { /// Ensure that the best block does indeed have a state_root in the state DB. /// If it doesn't, then rewind down until we find one that does and delete data to ensure that /// later blocks will be reimported. - pub fn ensure_good(&mut self, _state: &OverlayDB) { + pub fn ensure_good(&mut self, _state: &JournalDB) { unimplemented!(); } diff --git a/src/client.rs b/src/client.rs index 3ee84ccd7..fe3eee242 100644 --- a/src/client.rs +++ b/src/client.rs @@ -104,10 +104,12 @@ pub trait BlockChainClient : Sync + Send { pub struct Client { chain: Arc>, engine: Arc>, - state_db: OverlayDB, + state_db: JournalDB, queue: BlockQueue, } +const HISTORY: u64 = 1000; + impl Client { /// Create a new client with given spec and DB path. pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { @@ -135,11 +137,12 @@ impl Client { let mut state_path = path.to_path_buf(); state_path.push("state"); let db = DB::open(&opts, state_path.to_str().unwrap()).unwrap(); - let mut state_db = OverlayDB::new(db); + let mut state_db = JournalDB::new(db); let engine = Arc::new(try!(spec.to_engine())); - engine.spec().ensure_db_good(&mut state_db); - state_db.commit().expect("Error commiting genesis state to state DB"); + if engine.spec().ensure_db_good(&mut state_db) { + state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + } // chain.write().unwrap().ensure_good(&state_db); @@ -196,7 +199,8 @@ impl Client { } self.chain.write().unwrap().insert_block(&bytes); //TODO: err here? - match result.drain().commit() { + let ancient = if header.number() >= HISTORY { Some(header.number() - HISTORY) } else { None }; + match result.drain().commit(header.number(), &header.hash(), ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap()))) { Ok(_) => (), Err(e) => { warn!(target: "client", "State DB commit failed: {:?}", e); diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index 99ffc3186..a5a1175fe 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -211,7 +211,7 @@ fn on_close_block() { use super::*; let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); - let mut db = OverlayDB::new_temp(); + let mut db = JournalDB::new_temp(); engine.spec().ensure_db_good(&mut db); let last_hashes = vec![genesis_header.hash()]; let b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); @@ -224,7 +224,7 @@ fn on_close_block_with_uncle() { use super::*; let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); - let mut db = OverlayDB::new_temp(); + let mut db = JournalDB::new_temp(); engine.spec().ensure_db_good(&mut db); let last_hashes = vec![genesis_header.hash()]; let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); diff --git a/src/ethereum/mod.rs b/src/ethereum/mod.rs index 603a64e7d..25a804db3 100644 --- a/src/ethereum/mod.rs +++ b/src/ethereum/mod.rs @@ -40,7 +40,7 @@ mod tests { fn ensure_db_good() { let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); - let mut db = OverlayDB::new_temp(); + let mut db = MemoryDB::new_temp(); engine.spec().ensure_db_good(&mut db); let s = State::from_existing(db.clone(), genesis_header.state_root.clone(), engine.account_start_nonce()); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); diff --git a/src/spec.rs b/src/spec.rs index b174b0e9f..e93b460c8 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -220,7 +220,7 @@ impl FromJson for Spec { impl Spec { /// Ensure that the given state DB has the trie nodes in for the genesis state. - pub fn ensure_db_good(&self, db: &mut HashDB) { + pub fn ensure_db_good(&self, db: &mut HashDB) -> bool { if !db.contains(&self.state_root()) { info!("Populating genesis state..."); let mut root = H256::new(); @@ -232,7 +232,8 @@ impl Spec { } assert!(db.contains(&self.state_root())); info!("Genesis state is ready"); - } + true + } else { false } } /// Create a new Spec from a JSON UTF-8 data resource `data`. diff --git a/src/state.rs b/src/state.rs index a186d6cd6..e325b8d34 100644 --- a/src/state.rs +++ b/src/state.rs @@ -10,7 +10,7 @@ pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. #[derive(Clone)] pub struct State { - db: OverlayDB, + db: JournalDB, root: H256, cache: RefCell>>, @@ -19,7 +19,7 @@ pub struct State { impl State { /// Creates new state with empty state root - pub fn new(mut db: OverlayDB, account_start_nonce: U256) -> State { + pub fn new(mut db: JournalDB, account_start_nonce: U256) -> State { let mut root = H256::new(); { // init trie and reset root too null @@ -35,7 +35,7 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: OverlayDB, root: H256, account_start_nonce: U256) -> State { + pub fn from_existing(db: JournalDB, root: H256, account_start_nonce: U256) -> State { { // trie should panic! if root does not exist let _ = SecTrieDB::new(&db, &root); @@ -51,11 +51,11 @@ impl State { /// Create temporary state object pub fn new_temp() -> State { - Self::new(OverlayDB::new_temp(), U256::from(0u8)) + Self::new(JournalDB::new_temp(), U256::from(0u8)) } /// Destroy the current object and return root and database. - pub fn drop(self) -> (H256, OverlayDB) { + pub fn drop(self) -> (H256, JournalDB) { (self.root, self.db) } @@ -65,7 +65,7 @@ impl State { } /// Expose the underlying database; good to use for calling `state.db().commit()`. - pub fn db(&mut self) -> &mut OverlayDB { + pub fn db(&mut self) -> &mut JournalDB { &mut self.db } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 011d46663..8dd49cda1 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -46,7 +46,7 @@ impl JournalDB { /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, &H256)>) -> Result { + pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -95,7 +95,7 @@ impl JournalDB { &last })) { let rlp = Rlp::new(&rlp_data); - let to_remove: Vec = rlp.val_at(if *canon_id == rlp.val_at(0) {2} else {1}); + let to_remove: Vec = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1}); for i in to_remove.iter() { self.forward.remove(i); } diff --git a/util/src/lib.rs b/util/src/lib.rs index 34961f1de..204266c54 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -89,6 +89,7 @@ pub use rlp::*; pub use hashdb::*; pub use memorydb::*; pub use overlaydb::*; +pub use journaldb::*; pub use math::*; pub use chainfilter::*; pub use crypto::*; diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 6330f1e25..3c2286657 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -131,10 +131,15 @@ impl OverlayDB { /// Get the refs and value of the given key. fn put_payload(&self, key: &H256, payload: (Bytes, u32)) { - let mut s = RlpStream::new_list(2); - s.append(&payload.1); - s.append(&payload.0); - self.backing.put(&key.bytes(), &s.out()).expect("Low-level database error. Some issue with your hard disk?"); + if payload.1 > 0 { + let mut s = RlpStream::new_list(2); + s.append(&payload.1); + s.append(&payload.0); + self.backing.put(&key.bytes(), &s.out()).expect("Low-level database error. Some issue with your hard disk?"); + } else { + self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + } + } } From 7b0e4af078eac3b63147fefaf90674e780373d5f Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 14:44:06 +0100 Subject: [PATCH 04/28] Compact state DB finished. Closes #172 --- src/client.rs | 6 +++--- src/sync/chain.rs | 2 +- util/src/journaldb.rs | 1 + util/src/overlaydb.rs | 8 ++++++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/client.rs b/src/client.rs index fe3eee242..cb1f4672d 100644 --- a/src/client.rs +++ b/src/client.rs @@ -117,7 +117,7 @@ impl Client { let mut opts = Options::new(); opts.create_if_missing(true); opts.set_max_open_files(256); - opts.set_use_fsync(false); + /*opts.set_use_fsync(false); opts.set_bytes_per_sync(8388608); opts.set_disable_data_sync(false); opts.set_block_cache_size_mb(1024); @@ -132,7 +132,7 @@ impl Client { opts.set_max_background_compactions(4); opts.set_max_background_flushes(4); opts.set_filter_deletes(false); - opts.set_disable_auto_compactions(true); + opts.set_disable_auto_compactions(false);*/ let mut state_path = path.to_path_buf(); state_path.push("state"); @@ -207,7 +207,7 @@ impl Client { return; } } - info!(target: "client", "Imported #{} ({})", header.number(), header.hash()); + debug!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } } diff --git a/src/sync/chain.rs b/src/sync/chain.rs index 43f5968f4..40dbc6c9c 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -471,7 +471,7 @@ impl ChainSync { pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Disconnecting {}", peer); if self.peers.contains_key(&peer) { - info!(target: "sync", "Disconneced {}:{}", peer, io.peer_info(peer)); + info!(target: "sync", "Disconnected {}:{}", peer, io.peer_info(peer)); self.clear_peer_download(peer); self.peers.remove(&peer); self.continue_sync(io); diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 8dd49cda1..803c0a8c3 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -100,6 +100,7 @@ impl JournalDB { self.forward.remove(i); } try!(self.backing.delete(&last)); + info!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len()); index += 1; } } diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 3c2286657..1d369af96 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -71,6 +71,7 @@ impl OverlayDB { /// ``` pub fn commit(&mut self) -> Result { let mut ret = 0u32; + let mut deletes = 0usize; for i in self.overlay.drain().into_iter() { let (key, (value, rc)) = i; if rc != 0 { @@ -81,7 +82,7 @@ impl OverlayDB { if total_rc < 0 { return Err(From::from(BaseDataError::NegativelyReferencedHash)); } - self.put_payload(&key, (back_value, total_rc as u32)); + deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { @@ -93,6 +94,7 @@ impl OverlayDB { ret += 1; } } + info!("OverlayDB::commit() deleted {} nodes", deletes); Ok(ret) } @@ -130,14 +132,16 @@ impl OverlayDB { } /// Get the refs and value of the given key. - fn put_payload(&self, key: &H256, payload: (Bytes, u32)) { + fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { if payload.1 > 0 { let mut s = RlpStream::new_list(2); s.append(&payload.1); s.append(&payload.0); self.backing.put(&key.bytes(), &s.out()).expect("Low-level database error. Some issue with your hard disk?"); + false } else { self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + true } } From 0222b3e08154dd385cf9ca01dd8e8017fd6ec150 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 14:51:49 +0100 Subject: [PATCH 05/28] Reduce log verbosity. --- util/src/journaldb.rs | 2 +- util/src/overlaydb.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 803c0a8c3..db45d51c7 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -100,7 +100,7 @@ impl JournalDB { self.forward.remove(i); } try!(self.backing.delete(&last)); - info!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len()); + trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len()); index += 1; } } diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 1d369af96..07c992693 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -94,7 +94,7 @@ impl OverlayDB { ret += 1; } } - info!("OverlayDB::commit() deleted {} nodes", deletes); + trace!("OverlayDB::commit() deleted {} nodes", deletes); Ok(ret) } From 1eafdc70087202f5fb9fe6a8ccc10b6641098dbd Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 15:47:50 +0100 Subject: [PATCH 06/28] PR fixes. --- src/client.rs | 1 - util/src/journaldb.rs | 6 +++--- util/src/rlp/rlpstream.rs | 8 ++++++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/client.rs b/src/client.rs index cb1f4672d..dc8bf3608 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,6 +1,5 @@ use util::*; use rocksdb::{Options, DB}; -use rocksdb::DBCompactionStyle::DBUniversalCompaction; use blockchain::{BlockChain, BlockProvider}; use views::BlockView; use error::*; diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index db45d51c7..a4032c154 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -68,7 +68,7 @@ impl JournalDB { let mut r = RlpStream::new_list(2); r.append(&now); r.append(&index); - last = r.out(); + last = r.drain(); &last })).is_some() { index += 1; @@ -78,7 +78,7 @@ impl JournalDB { r.append(id); r.append(&self.inserts); r.append(&self.removes); - try!(self.backing.put(&last, &r.out())); + try!(self.backing.put(&last, r.as_raw())); self.inserts.clear(); self.removes.clear(); } @@ -91,7 +91,7 @@ impl JournalDB { let mut r = RlpStream::new_list(2); r.append(&end_era); r.append(&index); - last = r.out(); + last = r.drain(); &last })) { let rlp = Rlp::new(&rlp_data); diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index b8954ae6f..a30978f24 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -142,6 +142,14 @@ impl RlpStream { self.note_appended(1); } } + + /// Drain the object and return the underlying ElasticArray. + pub fn drain(self) -> ElasticArray1024 { + match self.is_finished() { + true => self.encoder.bytes, + false => panic!() + } + } } struct BasicEncoder { From bd21c6c327cd69c4df2f4bf104a23fdc020e32b1 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 15:48:38 +0100 Subject: [PATCH 07/28] note_used and cache management datatypes. --- src/blockchain.rs | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/src/blockchain.rs b/src/blockchain.rs index f19b958c5..b55b71fdb 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -96,6 +96,17 @@ pub trait BlockProvider { } } +#[derive(Debug, Hash, Eq, PartialEq, Clone)] +struct CacheID { + id: H256, + extra: usize +} + +struct CacheManager { + cache_usage: VecDeque>, + in_use: HashSet, +} + /// Structure providing fast access to blockchain data. /// /// **Does not do input data verification.** @@ -113,7 +124,9 @@ pub struct BlockChain { blocks_blooms: RwLock>, extras_db: DB, - blocks_db: DB + blocks_db: DB, + + cache_man: RwLock, } impl BlockProvider for BlockChain { @@ -158,6 +171,10 @@ impl BlockProvider for BlockChain { } } +const COLLECTION_QUEUE_SIZE: usize = 2; +const MIN_CACHE_SIZE: usize = 1; +const MAX_CACHE_SIZE: usize = 1024 * 1024 * 1; + impl BlockChain { /// Create new instance of blockchain from given Genesis /// @@ -206,7 +223,8 @@ impl BlockChain { block_logs: RwLock::new(HashMap::new()), blocks_blooms: RwLock::new(HashMap::new()), extras_db: extras_db, - blocks_db: blocks_db + blocks_db: blocks_db, + cache_man: RwLock::new(CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()}), }; // load best block @@ -537,6 +555,23 @@ impl BlockChain { self.block_logs.write().unwrap().squeeze(size.block_logs); self.blocks_blooms.write().unwrap().squeeze(size.blocks_blooms); } + + fn note_used(&self, id: CacheID) { + let mut cache_man = self.cache_man.write().unwrap(); + cache_man.cache_usage[0].insert(id.clone()); + // TODO: check more than just the first? + if cache_man.cache_usage[1].contains(&id) { + cache_man.cache_usage[1].remove(&id); + } + else { + cache_man.in_use.insert(id); + } + } + + /// Ticks our cache system and throws out any old data. + pub fn tick(&self) { + + } } #[cfg(test)] From 76cded453bfeb629dc6e935f699ed2293ff045d2 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 19:23:28 +0100 Subject: [PATCH 08/28] Cache manager. Closes #135 --- src/bin/client.rs | 4 ++- src/blockchain.rs | 71 +++++++++++++++++++++++++++++++++++++++-------- src/client.rs | 18 +++++++++++- src/extras.rs | 4 ++- 4 files changed, 82 insertions(+), 15 deletions(-) diff --git a/src/bin/client.rs b/src/bin/client.rs index a862737be..19ae5bfd8 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -53,7 +53,9 @@ impl IoHandler for ClientIoHandler { fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, timer: TimerToken) { if self.timer == timer { - println!("Chain info: {:?}", self.client.read().unwrap().deref().chain_info()); + self.client.tick(); + println!("Chain info: {}", self.client.read().unwrap().deref().chain_info()); + println!("Cache info: {:?}", self.client.read().unwrap().deref().cache_info()); } } } diff --git a/src/blockchain.rs b/src/blockchain.rs index b55b71fdb..d0b97a0ff 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -30,6 +30,11 @@ pub struct CacheSize { pub blocks_blooms: usize } +impl CacheSize { + /// Total amount used by the cache. + fn total(&self) -> usize { self.blocks + self.block_details + self.transaction_addresses + self.block_logs + self.blocks_blooms } +} + /// Information about best block gathered together struct BestBlock { pub hash: H256, @@ -97,9 +102,9 @@ pub trait BlockProvider { } #[derive(Debug, Hash, Eq, PartialEq, Clone)] -struct CacheID { - id: H256, - extra: usize +enum CacheID { + Block(H256), + Extras(ExtrasIndex, H256), } struct CacheManager { @@ -149,6 +154,8 @@ impl BlockProvider for BlockChain { let opt = self.blocks_db.get(hash) .expect("Low level database error. Some issue with disk?"); + self.note_used(CacheID::Block(hash.clone())); + match opt { Some(b) => { let bytes: Bytes = b.to_vec(); @@ -214,6 +221,9 @@ impl BlockChain { blocks_path.push("blocks"); let blocks_db = DB::open_default(blocks_path.to_str().unwrap()).unwrap(); + let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()}; + (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); + let bc = BlockChain { best_block: RwLock::new(BestBlock::new()), blocks: RwLock::new(HashMap::new()), @@ -224,7 +234,7 @@ impl BlockChain { blocks_blooms: RwLock::new(HashMap::new()), extras_db: extras_db, blocks_db: blocks_db, - cache_man: RwLock::new(CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()}), + cache_man: RwLock::new(cache_man), }; // load best block @@ -515,6 +525,10 @@ impl BlockChain { } } + if let Some(h) = hash.as_h256() { + self.note_used(CacheID::Extras(T::extras_index(), h.clone())); + } + self.extras_db.get_extras(hash).map(| t: T | { let mut write = cache.write().unwrap(); write.insert(hash.clone(), t.clone()); @@ -556,21 +570,54 @@ impl BlockChain { self.blocks_blooms.write().unwrap().squeeze(size.blocks_blooms); } + /// Let the cache system know that a cacheable item has been used. fn note_used(&self, id: CacheID) { let mut cache_man = self.cache_man.write().unwrap(); - cache_man.cache_usage[0].insert(id.clone()); - // TODO: check more than just the first? - if cache_man.cache_usage[1].contains(&id) { - cache_man.cache_usage[1].remove(&id); - } - else { - cache_man.in_use.insert(id); + if !cache_man.cache_usage[0].contains(&id) { + cache_man.cache_usage[0].insert(id.clone()); + if cache_man.in_use.contains(&id) { + if let Some(c) = cache_man.cache_usage.iter_mut().skip(1).find(|e|e.contains(&id)) { + c.remove(&id); + } + } else { + cache_man.in_use.insert(id); + } } } /// Ticks our cache system and throws out any old data. - pub fn tick(&self) { + pub fn collect_garbage(&self, force: bool) { + // TODO: check time. + let timeout = true; + let t = self.cache_size().total(); + if t < MIN_CACHE_SIZE || (!timeout && (!force || t < MAX_CACHE_SIZE)) { return; } + + let mut cache_man = self.cache_man.write().unwrap(); + let mut blocks = self.blocks.write().unwrap(); + let mut block_details = self.block_details.write().unwrap(); + let mut block_hashes = self.block_hashes.write().unwrap(); + let mut transaction_addresses = self.transaction_addresses.write().unwrap(); + let mut block_logs = self.block_logs.write().unwrap(); + let mut blocks_blooms = self.blocks_blooms.write().unwrap(); + + for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { + cache_man.in_use.remove(&id); + match id { + CacheID::Block(h) => { blocks.remove(&h); }, + CacheID::Extras(ExtrasIndex::BlockDetails, h) => { block_details.remove(&h); }, + CacheID::Extras(ExtrasIndex::TransactionAddress, h) => { transaction_addresses.remove(&h); }, + CacheID::Extras(ExtrasIndex::BlockLogBlooms, h) => { block_logs.remove(&h); }, + CacheID::Extras(ExtrasIndex::BlocksBlooms, h) => { blocks_blooms.remove(&h); }, + _ => panic!(), + } + } + cache_man.cache_usage.push_front(HashSet::new()); + + // TODO: handle block_hashes properly. + block_hashes.clear(); + + // TODO: m_lastCollection = chrono::system_clock::now(); } } diff --git a/src/client.rs b/src/client.rs index dc8bf3608..0d9e94606 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,6 +1,6 @@ use util::*; use rocksdb::{Options, DB}; -use blockchain::{BlockChain, BlockProvider}; +use blockchain::{BlockChain, BlockProvider, CacheSize}; use views::BlockView; use error::*; use header::BlockNumber; @@ -40,6 +40,12 @@ pub struct BlockChainInfo { pub best_block_number: BlockNumber } +impl fmt::Display for BlockChainInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) + } +} + /// Block queue status #[derive(Debug)] pub struct BlockQueueStatus { @@ -208,6 +214,16 @@ impl Client { } debug!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } + + /// Get info on the cache. + pub fn cache_info(&self) -> CacheSize { + self.chain.read().unwrap().cache_size() + } + + /// Tick the client. + pub fn tick(&self) { + self.chain.read().unwrap().collect_garbage(false); + } } impl BlockChainClient for Client { diff --git a/src/extras.rs b/src/extras.rs index ed0032698..8052af791 100644 --- a/src/extras.rs +++ b/src/extras.rs @@ -3,7 +3,7 @@ use header::BlockNumber; use rocksdb::{DB, Writable}; /// Represents index of extra data in database -#[derive(Copy, Clone)] +#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] pub enum ExtrasIndex { BlockDetails = 0, BlockHash = 1, @@ -59,6 +59,7 @@ impl ExtrasReadable for DB { /// Implementations should convert arbitrary type to database key slice pub trait ExtrasSliceConvertable { fn to_extras_slice(&self, i: ExtrasIndex) -> H264; + fn as_h256(&self) -> Option<&H256> { None } } impl ExtrasSliceConvertable for H256 { @@ -67,6 +68,7 @@ impl ExtrasSliceConvertable for H256 { slice[32] = i as u8; slice } + fn as_h256(&self) -> Option<&H256> { Some(self) } } impl ExtrasSliceConvertable for U256 { From a2860eb1151b986f6f93f4c448edadab67dc729a Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 19:34:30 +0100 Subject: [PATCH 09/28] Fix client builds. --- src/bin/client.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/bin/client.rs b/src/bin/client.rs index 19ae5bfd8..6f0f4f475 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -53,9 +53,10 @@ impl IoHandler for ClientIoHandler { fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, timer: TimerToken) { if self.timer == timer { - self.client.tick(); - println!("Chain info: {}", self.client.read().unwrap().deref().chain_info()); - println!("Cache info: {:?}", self.client.read().unwrap().deref().cache_info()); + let client = self.client.read().unwrap(); + client.tick(); + println!("Chain info: {}", client.chain_info()); + println!("Cache info: {:?}", client.cache_info()); } } } From 40d9cbdeafb5dd4a8599ed6e30e5d9f402949a0b Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 23:23:32 +0100 Subject: [PATCH 10/28] Delta-based logging. Closes #181 --- src/bin/client.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- src/client.rs | 26 +++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 4 deletions(-) diff --git a/src/bin/client.rs b/src/bin/client.rs index 6f0f4f475..3335d8a72 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -12,6 +12,7 @@ use util::*; use ethcore::client::*; use ethcore::service::ClientService; use ethcore::ethereum; +use ethcore::blockchain::CacheSize; use ethcore::sync::*; fn setup_log() { @@ -29,7 +30,7 @@ fn main() { setup_log(); let spec = ethereum::new_frontier(); let mut service = ClientService::start(spec).unwrap(); - let io_handler = Box::new(ClientIoHandler { client: service.client(), timer: 0 }); + let io_handler = Box::new(ClientIoHandler { client: service.client(), timer: 0, info: Default::default() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); loop { let mut cmd = String::new(); @@ -40,10 +41,47 @@ fn main() { } } +#[derive(Default, Debug)] +struct Informant { + chain_info: Option, + cache_info: Option, + report: Option, +} + +impl Informant { + pub fn tick(&mut self, client: &Client) { + // 5 seconds betwen calls. TODO: calculate this properly. + let dur = 5usize; + + let chain_info = client.chain_info(); + let cache_info = client.cache_info(); + let report = client.report(); + + if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (&self.chain_info, &self.cache_info, &self.report) { + println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //···{}···// {} ({}) bl {} ({}) ex ]", + chain_info.best_block_number, + chain_info.best_block_hash, + (report.blocks_imported - last_report.blocks_imported) / dur, + (report.transactions_applied - last_report.transactions_applied) / dur, + (report.gas_processed - last_report.gas_processed) / From::from(dur), + 0, // TODO: peers + cache_info.blocks, + cache_info.blocks as isize - last_cache_info.blocks as isize, + cache_info.block_details, + cache_info.block_details as isize - last_cache_info.block_details as isize + ); + } + + self.chain_info = Some(chain_info); + self.cache_info = Some(cache_info); + self.report = Some(report); + } +} struct ClientIoHandler { client: Arc>, timer: TimerToken, + info: Informant, } impl IoHandler for ClientIoHandler { @@ -55,8 +93,7 @@ impl IoHandler for ClientIoHandler { if self.timer == timer { let client = self.client.read().unwrap(); client.tick(); - println!("Chain info: {}", client.chain_info()); - println!("Cache info: {:?}", client.cache_info()); + self.info.tick(client.deref()); } } } diff --git a/src/client.rs b/src/client.rs index f8c8e8bb8..cf57e6a07 100644 --- a/src/client.rs +++ b/src/client.rs @@ -105,12 +105,28 @@ pub trait BlockChainClient : Sync + Send { fn chain_info(&self) -> BlockChainInfo; } +#[derive(Default, Clone, Debug, Eq, PartialEq)] +pub struct ClientReport { + pub blocks_imported: usize, + pub transactions_applied: usize, + pub gas_processed: U256, +} + +impl ClientReport { + pub fn accrue_block(&mut self, block: &PreVerifiedBlock) { + self.blocks_imported += 1; + self.transactions_applied += block.transactions.len(); + self.gas_processed += block.header.gas_used; + } +} + /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. pub struct Client { chain: Arc>, engine: Arc>, state_db: JournalDB, queue: BlockQueue, + report: ClientReport, } const HISTORY: u64 = 1000; @@ -156,6 +172,7 @@ impl Client { engine: engine.clone(), state_db: state_db, queue: BlockQueue::new(engine, message_channel), + report: Default::default(), }) } @@ -228,7 +245,9 @@ impl Client { return; } } - info!(target: "client", "Imported #{} ({})", header.number(), header.hash()); + self.report.accrue_block(&block); + + trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } } @@ -237,6 +256,11 @@ impl Client { self.chain.read().unwrap().cache_size() } + /// Get the report. + pub fn report(&self) -> ClientReport { + self.report.clone() + } + /// Tick the client. pub fn tick(&self) { self.chain.read().unwrap().collect_garbage(false); From f946088e4393cdecddb3675241d500fbd5b8c705 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 23:46:33 +0100 Subject: [PATCH 11/28] Implement signs having low-s. Closes #159 --- util/src/crypto.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/util/src/crypto.rs b/util/src/crypto.rs index 7e1c5fce1..e9e440024 100644 --- a/util/src/crypto.rs +++ b/util/src/crypto.rs @@ -151,6 +151,12 @@ pub mod ec { let mut signature: crypto::Signature = unsafe { ::std::mem::uninitialized() }; signature.clone_from_slice(&data); signature[64] = rec_id.to_i32() as u8; + + let (_, s, v) = signature.to_rsv(); + let secp256k1n = U256::from_str("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141").unwrap(); + if !is_low_s(&s) { + signature = super::Signature::from_rsv(&H256::from_slice(&signature[0..32]), &H256::from(secp256k1n - s), v ^ 1); + } Ok(signature) } /// Verify signature. @@ -174,7 +180,7 @@ pub mod ec { /// Check if this is a "low" signature. pub fn is_low(sig: &Signature) -> bool { - H256::from_slice(&sig[32..64]) <= h256_from_hex("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0") + H256::from_slice(&sig[32..64]) <= h256_from_hex("7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0") } /// Check if this is a "low" signature. From a9a4da4b3e33beaef36425defe833d96bde2faf5 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 23:50:40 +0100 Subject: [PATCH 12/28] Fix test build. --- util/src/journaldb.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index a4032c154..ada9c0d2b 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -140,9 +140,9 @@ mod tests { assert!(jdb.exists(&h)); jdb.commit(2, &b"2".sha3(), None).unwrap(); assert!(jdb.exists(&h)); - jdb.commit(3, &b"3".sha3(), Some((0, &b"0".sha3()))).unwrap(); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.exists(&h)); - jdb.commit(4, &b"4".sha3(), Some((1, &b"1".sha3()))).unwrap(); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(!jdb.exists(&h)); } @@ -160,25 +160,25 @@ mod tests { jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1".sha3(), Some((0, &b"0".sha3()))).unwrap(); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit(2, &b"2".sha3(), Some((1, &b"1".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, &b"2".sha3()))).unwrap(); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); - jdb.commit(4, &b"4".sha3(), Some((3, &b"3".sha3()))).unwrap(); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(!jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); @@ -197,16 +197,16 @@ mod tests { jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, &b"0".sha3()))).unwrap(); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, &b"0".sha3()))).unwrap(); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); - jdb.commit(2, &b"2b".sha3(), Some((1, &b"1b".sha3()))).unwrap(); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); From ab0aabf02c1cf66c0bc74b448534ab37c802273c Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 18 Jan 2016 23:54:16 +0100 Subject: [PATCH 13/28] PR suggestion. --- util/src/overlaydb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 07c992693..e8492091f 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -137,7 +137,7 @@ impl OverlayDB { let mut s = RlpStream::new_list(2); s.append(&payload.1); s.append(&payload.0); - self.backing.put(&key.bytes(), &s.out()).expect("Low-level database error. Some issue with your hard disk?"); + self.backing.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); false } else { self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); From 9c38e7e68cf5ed6896efb8d44659f6ea7f03c53b Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 19 Jan 2016 09:59:16 +0100 Subject: [PATCH 14/28] moved src/bin/client.rs -> src/bin/client/main.rs --- Cargo.toml | 4 ++++ src/bin/{client.rs => client/main.rs} | 0 2 files changed, 4 insertions(+) rename src/bin/{client.rs => client/main.rs} (100%) diff --git a/Cargo.toml b/Cargo.toml index 0583aa78f..04c4bf956 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,3 +24,7 @@ num_cpus = "0.2" [features] jit = ["evmjit"] evm_debug = [] + +[[bin]] +name = "client" +path = "src/bin/client/main.rs" diff --git a/src/bin/client.rs b/src/bin/client/main.rs similarity index 100% rename from src/bin/client.rs rename to src/bin/client/main.rs From ec3bc85f222028bad6484b459ca77365a52461da Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 19 Jan 2016 11:53:38 +0100 Subject: [PATCH 15/28] Fix tests. --- src/account.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/account.rs b/src/account.rs index c64e80ed8..c6c4136df 100644 --- a/src/account.rs +++ b/src/account.rs @@ -236,7 +236,7 @@ mod tests { #[test] fn storage_at() { - let mut db = MemoryDB::new_temp(); + let mut db = MemoryDB::new(); let rlp = { let mut a = Account::new_contract(U256::from(69u8)); a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64))); @@ -254,7 +254,7 @@ mod tests { #[test] fn note_code() { - let mut db = MemoryDB::new_temp(); + let mut db = MemoryDB::new(); let rlp = { let mut a = Account::new_contract(U256::from(69u8)); @@ -273,7 +273,7 @@ mod tests { #[test] fn commit_storage() { let mut a = Account::new_contract(U256::from(69u8)); - let mut db = MemoryDB::new_temp(); + let mut db = MemoryDB::new(); a.set_storage(x!(0), x!(0x1234)); assert_eq!(a.storage_root(), None); a.commit_storage(&mut db); @@ -283,7 +283,7 @@ mod tests { #[test] fn commit_remove_commit_storage() { let mut a = Account::new_contract(U256::from(69u8)); - let mut db = MemoryDB::new_temp(); + let mut db = MemoryDB::new(); a.set_storage(x!(0), x!(0x1234)); a.commit_storage(&mut db); a.set_storage(x!(1), x!(0x1234)); @@ -296,7 +296,7 @@ mod tests { #[test] fn commit_code() { let mut a = Account::new_contract(U256::from(69u8)); - let mut db = MemoryDB::new_temp(); + let mut db = MemoryDB::new(); a.init_code(vec![0x55, 0x44, 0xffu8]); assert_eq!(a.code_hash(), SHA3_EMPTY); a.commit_code(&mut db); From af3d76fbf14df4c5ad6f2b5eeb1ac0aa914e3ba9 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 19 Jan 2016 11:56:41 +0100 Subject: [PATCH 16/28] Another fix. --- src/ethereum/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ethereum/mod.rs b/src/ethereum/mod.rs index 25a804db3..d9be7be85 100644 --- a/src/ethereum/mod.rs +++ b/src/ethereum/mod.rs @@ -40,7 +40,7 @@ mod tests { fn ensure_db_good() { let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); - let mut db = MemoryDB::new_temp(); + let mut db = MemoryDB::new(); engine.spec().ensure_db_good(&mut db); let s = State::from_existing(db.clone(), genesis_header.state_root.clone(), engine.account_start_nonce()); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); From e82bcedf2e69c74af381b03ededa1db709a04b14 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 19 Jan 2016 11:57:10 +0100 Subject: [PATCH 17/28] Fix again. --- src/ethereum/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ethereum/mod.rs b/src/ethereum/mod.rs index d9be7be85..e97ac79a3 100644 --- a/src/ethereum/mod.rs +++ b/src/ethereum/mod.rs @@ -40,7 +40,7 @@ mod tests { fn ensure_db_good() { let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); - let mut db = MemoryDB::new(); + let mut db = JournalDB::new_temp(); engine.spec().ensure_db_good(&mut db); let s = State::from_existing(db.clone(), genesis_header.state_root.clone(), engine.account_start_nonce()); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); From 9fa4a693ce4cd2b1b176e916b320f1eabd852992 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 19 Jan 2016 12:04:27 +0100 Subject: [PATCH 18/28] fixed build issues with stabilized feature WrappingOps --- src/evm/interpreter.rs | 1 - src/lib.rs | 1 - util/src/lib.rs | 2 +- util/src/uint.rs | 217 ++++++++++++++++++++++------------------- 4 files changed, 117 insertions(+), 104 deletions(-) diff --git a/src/evm/interpreter.rs b/src/evm/interpreter.rs index bd3a153b9..88823cbea 100644 --- a/src/evm/interpreter.rs +++ b/src/evm/interpreter.rs @@ -4,7 +4,6 @@ use common::*; use evm; use super::instructions as instructions; use super::instructions::Instruction; -use std::num::wrapping::OverflowingOps; use std::marker::Copy; use evm::{MessageCallResult, ContractCreateResult}; diff --git a/src/lib.rs b/src/lib.rs index 8180736a5..20c59eafa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,5 @@ #![feature(cell_extras)] #![feature(augmented_assignments)] -#![feature(wrapping)] //#![feature(plugin)] //#![plugin(interpolate_idents)] //! Ethcore's ethereum implementation diff --git a/util/src/lib.rs b/util/src/lib.rs index 4bc47e61c..a5dd7e8a2 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,7 +1,7 @@ #![feature(op_assign_traits)] #![feature(augmented_assignments)] #![feature(associated_consts)] -#![feature(wrapping)] +//#![feature(wrapping)] //! Ethcore-util library //! //! ### Rust version: diff --git a/util/src/uint.rs b/util/src/uint.rs index ec70cddb2..8afbe87f4 100644 --- a/util/src/uint.rs +++ b/util/src/uint.rs @@ -23,7 +23,7 @@ use standard::*; use from_json::*; -use std::num::wrapping::OverflowingOps; +//use std::num::wrapping::OverflowingOps; macro_rules! impl_map_from { ($thing:ident, $from:ty, $to:ty) => { @@ -97,6 +97,23 @@ pub trait Uint: Sized + Default + FromStr + From + FromJson + fmt::Debug + fn pow(self, other: Self) -> Self; /// Return wrapped eponentation `self**other` and flag if there was an overflow fn overflowing_pow(self, other: Self) -> (Self, bool); + + + fn overflowing_add(self, other: Self) -> (Self, bool); + + fn overflowing_sub(self, other: Self) -> (Self, bool); + + fn overflowing_mul(self, other: Self) -> (Self, bool); + + fn overflowing_div(self, other: Self) -> (Self, bool); + + fn overflowing_rem(self, other: Self) -> (Self, bool); + + fn overflowing_neg(self) -> (Self, bool); + + fn overflowing_shl(self, shift: u32) -> (Self, bool); + + fn overflowing_shr(self, shift: u32) -> (Self, bool); } macro_rules! construct_uint { @@ -259,6 +276,104 @@ macro_rules! construct_uint { let res = overflowing!(x.overflowing_mul(y), overflow); (res, overflow) } + + fn overflowing_add(self, other: $name) -> ($name, bool) { + let $name(ref me) = self; + let $name(ref you) = other; + let mut ret = [0u64; $n_words]; + let mut carry = [0u64; $n_words]; + let mut b_carry = false; + let mut overflow = false; + + for i in 0..$n_words { + ret[i] = me[i].wrapping_add(you[i]); + + if ret[i] < me[i] { + if i < $n_words - 1 { + carry[i + 1] = 1; + b_carry = true; + } else { + overflow = true; + } + } + } + if b_carry { + let ret = overflowing!($name(ret).overflowing_add($name(carry)), overflow); + (ret, overflow) + } else { + ($name(ret), overflow) + } + } + + fn overflowing_sub(self, other: $name) -> ($name, bool) { + let res = overflowing!((!other).overflowing_add(From::from(1u64))); + let res = overflowing!(self.overflowing_add(res)); + (res, self < other) + } + + fn overflowing_mul(self, other: $name) -> ($name, bool) { + let mut res = $name::from(0u64); + let mut overflow = false; + // TODO: be more efficient about this + for i in 0..(2 * $n_words) { + let v = overflowing!(self.overflowing_mul_u32((other >> (32 * i)).low_u32()), overflow); + let res2 = overflowing!(v.overflowing_shl(32 * i as u32), overflow); + res = overflowing!(res.overflowing_add(res2), overflow); + } + (res, overflow) + } + + fn overflowing_div(self, other: $name) -> ($name, bool) { + (self / other, false) + } + + fn overflowing_rem(self, other: $name) -> ($name, bool) { + (self % other, false) + } + + fn overflowing_neg(self) -> ($name, bool) { + (!self, true) + } + + fn overflowing_shl(self, shift32: u32) -> ($name, bool) { + let $name(ref original) = self; + let mut ret = [0u64; $n_words]; + let shift = shift32 as usize; + let word_shift = shift / 64; + let bit_shift = shift % 64; + for i in 0..$n_words { + // Shift + if i + word_shift < $n_words { + ret[i + word_shift] += original[i] << bit_shift; + } + // Carry + if bit_shift > 0 && i + word_shift + 1 < $n_words { + ret[i + word_shift + 1] += original[i] >> (64 - bit_shift); + } + } + // Detecting overflow + let last = $n_words - word_shift - if bit_shift > 0 { 1 } else { 0 }; + let overflow = if bit_shift > 0 { + (original[last] >> (64 - bit_shift)) > 0 + } else if word_shift > 0 { + original[last] > 0 + } else { + false + }; + + for i in last+1..$n_words-1 { + if original[i] > 0 { + return ($name(ret), true); + } + } + ($name(ret), overflow) + } + + fn overflowing_shr(self, _shift32: u32) -> ($name, bool) { + // TODO [todr] not used for now + unimplemented!(); + } + } impl $name { @@ -390,105 +505,6 @@ macro_rules! construct_uint { } } - impl OverflowingOps for $name { - fn overflowing_add(self, other: $name) -> ($name, bool) { - let $name(ref me) = self; - let $name(ref you) = other; - let mut ret = [0u64; $n_words]; - let mut carry = [0u64; $n_words]; - let mut b_carry = false; - let mut overflow = false; - - for i in 0..$n_words { - ret[i] = me[i].wrapping_add(you[i]); - - if ret[i] < me[i] { - if i < $n_words - 1 { - carry[i + 1] = 1; - b_carry = true; - } else { - overflow = true; - } - } - } - if b_carry { - let ret = overflowing!($name(ret).overflowing_add($name(carry)), overflow); - (ret, overflow) - } else { - ($name(ret), overflow) - } - } - - fn overflowing_sub(self, other: $name) -> ($name, bool) { - let res = overflowing!((!other).overflowing_add(From::from(1u64))); - let res = overflowing!(self.overflowing_add(res)); - (res, self < other) - } - - fn overflowing_mul(self, other: $name) -> ($name, bool) { - let mut res = $name::from(0u64); - let mut overflow = false; - // TODO: be more efficient about this - for i in 0..(2 * $n_words) { - let v = overflowing!(self.overflowing_mul_u32((other >> (32 * i)).low_u32()), overflow); - let res2 = overflowing!(v.overflowing_shl(32 * i as u32), overflow); - res = overflowing!(res.overflowing_add(res2), overflow); - } - (res, overflow) - } - - fn overflowing_div(self, other: $name) -> ($name, bool) { - (self / other, false) - } - - fn overflowing_rem(self, other: $name) -> ($name, bool) { - (self % other, false) - } - - fn overflowing_neg(self) -> ($name, bool) { - (!self, true) - } - - fn overflowing_shl(self, shift32: u32) -> ($name, bool) { - let $name(ref original) = self; - let mut ret = [0u64; $n_words]; - let shift = shift32 as usize; - let word_shift = shift / 64; - let bit_shift = shift % 64; - for i in 0..$n_words { - // Shift - if i + word_shift < $n_words { - ret[i + word_shift] += original[i] << bit_shift; - } - // Carry - if bit_shift > 0 && i + word_shift + 1 < $n_words { - ret[i + word_shift + 1] += original[i] >> (64 - bit_shift); - } - } - // Detecting overflow - let last = $n_words - word_shift - if bit_shift > 0 { 1 } else { 0 }; - let overflow = if bit_shift > 0 { - (original[last] >> (64 - bit_shift)) > 0 - } else if word_shift > 0 { - original[last] > 0 - } else { - false - }; - - for i in last+1..$n_words-1 { - if original[i] > 0 { - return ($name(ret), true); - } - } - ($name(ret), overflow) - } - - fn overflowing_shr(self, _shift32: u32) -> ($name, bool) { - // TODO [todr] not used for now - unimplemented!(); - } - } - impl Add<$name> for $name { type Output = $name; @@ -915,7 +931,6 @@ pub const BAD_U256: U256 = U256([0xffffffffffffffffu64; 4]); mod tests { use uint::{Uint, U128, U256, U512}; use std::str::FromStr; - use std::num::wrapping::OverflowingOps; #[test] pub fn assign_ops() { From 03bdecf23fc6f849180c4862f12ae04e7e868ce3 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 19 Jan 2016 12:05:53 +0100 Subject: [PATCH 19/28] removed commented lines --- util/src/lib.rs | 1 - util/src/uint.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/util/src/lib.rs b/util/src/lib.rs index a5dd7e8a2..84d67764c 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,7 +1,6 @@ #![feature(op_assign_traits)] #![feature(augmented_assignments)] #![feature(associated_consts)] -//#![feature(wrapping)] //! Ethcore-util library //! //! ### Rust version: diff --git a/util/src/uint.rs b/util/src/uint.rs index 8afbe87f4..60493688c 100644 --- a/util/src/uint.rs +++ b/util/src/uint.rs @@ -23,7 +23,6 @@ use standard::*; use from_json::*; -//use std::num::wrapping::OverflowingOps; macro_rules! impl_map_from { ($thing:ident, $from:ty, $to:ty) => { From 3a1b37d6f26a8b81caf135513f41a60bd9610372 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 19 Jan 2016 12:20:44 +0100 Subject: [PATCH 20/28] removed overflowing_shr --- util/src/uint.rs | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/util/src/uint.rs b/util/src/uint.rs index 60493688c..c1cb38fd2 100644 --- a/util/src/uint.rs +++ b/util/src/uint.rs @@ -111,8 +111,6 @@ pub trait Uint: Sized + Default + FromStr + From + FromJson + fmt::Debug + fn overflowing_neg(self) -> (Self, bool); fn overflowing_shl(self, shift: u32) -> (Self, bool); - - fn overflowing_shr(self, shift: u32) -> (Self, bool); } macro_rules! construct_uint { @@ -367,12 +365,6 @@ macro_rules! construct_uint { } ($name(ret), overflow) } - - fn overflowing_shr(self, _shift32: u32) -> ($name, bool) { - // TODO [todr] not used for now - unimplemented!(); - } - } impl $name { @@ -1311,28 +1303,6 @@ mod tests { ); } - #[ignore] - #[test] - pub fn uint256_shr_overflow() { - assert_eq!( - U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - .overflowing_shr(4), - (U256::from_str("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), true) - ); - } - - #[ignore] - #[test] - pub fn uint256_shr_overflow2() { - assert_eq!( - U256::from_str("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0").unwrap() - .overflowing_shr(4), - (U256::from_str("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), false) - ); - } - - - #[test] pub fn uint256_mul() { assert_eq!( From 7c4868e51a80bede16fe96f59b566f002b51dbb3 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 14:35:56 +0100 Subject: [PATCH 21/28] Generating coverage report --- cov.sh | 21 +++++++++++++++++++++ util/cov.sh | 1 + 2 files changed, 22 insertions(+) create mode 100755 cov.sh create mode 120000 util/cov.sh diff --git a/cov.sh b/cov.sh new file mode 100755 index 000000000..e7366ab82 --- /dev/null +++ b/cov.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# Installing KCOV under ubuntu +# https://users.rust-lang.org/t/tutorial-how-to-collect-test-coverages-for-rust-project/650# +### Install deps +# $ sudo apt-get install libcurl4-openssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev +# +### Compile kcov +# $ wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz && tar xf master.tar.gz +# $ cd kcov-master && mkdir build && cd build +# $ cmake .. && make && sudo make install + +### Running coverage +if ! type kcov > /dev/null; then + echo "Install kcov first (details inside this file). Aborting." + exit 1 +fi + +cargo test --no-run +mkdir -p target/coverage +kcov --verify --skip-solibs target/coverage target/debug/ethcore-* +xdg-open target/coverage/index.html diff --git a/util/cov.sh b/util/cov.sh new file mode 120000 index 000000000..72bb061f8 --- /dev/null +++ b/util/cov.sh @@ -0,0 +1 @@ +../cov.sh \ No newline at end of file From 33d8fdca3f2cc2114621a4bccb1af3ea7c979293 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 14:43:24 +0100 Subject: [PATCH 22/28] Fixing include/exclude patterns --- cov.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cov.sh b/cov.sh index e7366ab82..7d13c8c96 100755 --- a/cov.sh +++ b/cov.sh @@ -15,7 +15,7 @@ if ! type kcov > /dev/null; then exit 1 fi -cargo test --no-run +cargo test --no-run || exit 2 mkdir -p target/coverage -kcov --verify --skip-solibs target/coverage target/debug/ethcore-* +kcov --exclude-pattern ~/.multirust --include-pattern src --verify target/coverage target/debug/ethcore* xdg-open target/coverage/index.html From 0885f44f5062a25fd82449c1fd5a108912d19c3b Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 15:02:03 +0100 Subject: [PATCH 23/28] Fixing error code --- cov.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cov.sh b/cov.sh index 7d13c8c96..9f2a87a47 100755 --- a/cov.sh +++ b/cov.sh @@ -15,7 +15,7 @@ if ! type kcov > /dev/null; then exit 1 fi -cargo test --no-run || exit 2 +cargo test --no-run || exit $? mkdir -p target/coverage kcov --exclude-pattern ~/.multirust --include-pattern src --verify target/coverage target/debug/ethcore* xdg-open target/coverage/index.html From bad2a244df5eab19296c26f5073ef58e5392f0a1 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 17:02:01 +0100 Subject: [PATCH 24/28] Adding documentation request comments. --- src/account_diff.rs | 10 +++++ src/action_params.rs | 1 + src/basic_types.rs | 3 ++ src/block.rs | 4 ++ src/blockchain.rs | 8 ++++ src/builtin.rs | 1 + src/client.rs | 7 ++++ src/engine.rs | 7 ++++ src/env_info.rs | 1 + src/error.rs | 74 +++++++++++++++++++++++++++++++++-- src/ethereum/denominations.rs | 5 +++ src/ethereum/ethash.rs | 1 + src/ethereum/mod.rs | 2 + src/evm/evm.rs | 8 ++++ src/evm/ext.rs | 1 + src/evm/factory.rs | 3 ++ src/evm/mod.rs | 1 + src/evm/schedule.rs | 31 +++++++++++++++ src/extras.rs | 19 +++++++++ src/header.rs | 38 ++++++++++++++++++ src/lib.rs | 26 ++++++++++++ src/log_entry.rs | 3 ++ src/null_engine.rs | 1 + src/pod_account.rs | 5 +++ src/pod_state.rs | 1 + src/queue.rs | 1 + src/receipt.rs | 5 +++ src/service.rs | 2 + src/spec.rs | 16 ++++++++ src/state.rs | 2 + src/state_diff.rs | 1 + src/substate.rs | 1 + src/sync/mod.rs | 1 + src/transaction.rs | 13 ++++++ util/src/bytes.rs | 16 ++++++++ util/src/chainfilter.rs | 2 + util/src/common.rs | 1 + util/src/crypto.rs | 18 +++++++++ util/src/error.rs | 13 ++++++ util/src/from_json.rs | 2 + util/src/hash.rs | 20 ++++++++++ util/src/io/mod.rs | 7 ++++ util/src/json_aid.rs | 1 + util/src/lib.rs | 13 ++++++ util/src/memorydb.rs | 2 + util/src/misc.rs | 6 +++ util/src/network/mod.rs | 6 +++ util/src/nibbleslice.rs | 4 ++ util/src/rlp/mod.rs | 9 +++++ util/src/rlp/rlperrors.rs | 9 +++++ util/src/rlp/rlpin.rs | 2 + util/src/rlp/rlpstream.rs | 4 ++ util/src/rlp/rlptraits.rs | 25 ++++++++++++ util/src/rlp/untrusted_rlp.rs | 6 +++ util/src/sha3.rs | 1 + util/src/squeeze.rs | 1 + util/src/trie/mod.rs | 7 ++++ util/src/trie/node.rs | 4 ++ util/src/trie/standardmap.rs | 4 ++ util/src/trie/triedb.rs | 1 + util/src/trie/triedbmut.rs | 1 + util/src/uint.rs | 15 +++++++ util/src/vector.rs | 3 ++ 63 files changed, 502 insertions(+), 4 deletions(-) diff --git a/src/account_diff.rs b/src/account_diff.rs index 2bf138669..06315db95 100644 --- a/src/account_diff.rs +++ b/src/account_diff.rs @@ -5,8 +5,11 @@ use pod_account::*; /// Change in existance type. // TODO: include other types of change. pub enum Existance { + /// TODO [Gav Wood] Please document me Born, + /// TODO [Gav Wood] Please document me Alive, + /// TODO [Gav Wood] Please document me Died, } @@ -22,14 +25,20 @@ impl fmt::Display for Existance { } #[derive(Debug,Clone,PartialEq,Eq)] +/// TODO [Gav Wood] Please document me pub struct AccountDiff { + /// TODO [Gav Wood] Please document me pub balance: Diff, // Allowed to be Same + /// TODO [Gav Wood] Please document me pub nonce: Diff, // Allowed to be Same + /// TODO [Gav Wood] Please document me pub code: Diff, // Allowed to be Same + /// TODO [Gav Wood] Please document me pub storage: BTreeMap>,// Not allowed to be Same } impl AccountDiff { + /// TODO [Gav Wood] Please document me pub fn existance(&self) -> Existance { match self.balance { Diff::Born(_) => Existance::Born, @@ -38,6 +47,7 @@ impl AccountDiff { } } + /// TODO [Gav Wood] Please document me pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option { match (pre, post) { (None, Some(x)) => Some(AccountDiff { diff --git a/src/action_params.rs b/src/action_params.rs index 372927326..da1ae0ce0 100644 --- a/src/action_params.rs +++ b/src/action_params.rs @@ -30,6 +30,7 @@ pub struct ActionParams { } impl ActionParams { + /// TODO [Gav Wood] Please document me pub fn new() -> ActionParams { ActionParams { code_address: Address::new(), diff --git a/src/basic_types.rs b/src/basic_types.rs index 2466d8813..3d6fc2609 100644 --- a/src/basic_types.rs +++ b/src/basic_types.rs @@ -6,7 +6,10 @@ pub type LogBloom = H2048; /// Constant 2048-bit datum for 0. Often used as a default. pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); +/// TODO [Gav Wood] Please document me pub enum Seal { + /// TODO [Gav Wood] Please document me With, + /// TODO [Gav Wood] Please document me Without, } diff --git a/src/block.rs b/src/block.rs index 1b578e3a3..ac0ef7f89 100644 --- a/src/block.rs +++ b/src/block.rs @@ -24,9 +24,13 @@ pub struct Block { /// A set of references to `Block` fields that are publicly accessible. pub struct BlockRefMut<'a> { + /// TODO [Gav Wood] Please document me pub header: &'a Header, + /// TODO [Gav Wood] Please document me pub state: &'a mut State, + /// TODO [Gav Wood] Please document me pub archive: &'a Vec, + /// TODO [Gav Wood] Please document me pub uncles: &'a Vec
, } diff --git a/src/blockchain.rs b/src/blockchain.rs index d0b97a0ff..27abe9ee3 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -15,18 +15,26 @@ use views::*; /// /// - `index` - an index where best common ancestor would be. pub struct TreeRoute { + /// TODO [debris] Please document me pub blocks: Vec, + /// TODO [debris] Please document me pub ancestor: H256, + /// TODO [debris] Please document me pub index: usize } /// Represents blockchain's in-memory cache size in bytes. #[derive(Debug)] pub struct CacheSize { + /// TODO [debris] Please document me pub blocks: usize, + /// TODO [debris] Please document me pub block_details: usize, + /// TODO [debris] Please document me pub transaction_addresses: usize, + /// TODO [debris] Please document me pub block_logs: usize, + /// TODO [debris] Please document me pub blocks_blooms: usize } diff --git a/src/builtin.rs b/src/builtin.rs index 85319c948..1835eda97 100644 --- a/src/builtin.rs +++ b/src/builtin.rs @@ -63,6 +63,7 @@ impl Builtin { } } +/// TODO [Gav Wood] Please document me pub fn copy_to(src: &[u8], dest: &mut[u8]) { // NICE: optimise for i in 0..min(src.len(), dest.len()) { diff --git a/src/client.rs b/src/client.rs index cf57e6a07..b0801f097 100644 --- a/src/client.rs +++ b/src/client.rs @@ -49,9 +49,11 @@ impl fmt::Display for BlockChainInfo { /// Block queue status #[derive(Debug)] pub struct BlockQueueStatus { + /// TODO [arkpar] Please document me pub full: bool, } +/// TODO [arkpar] Please document me pub type TreeRoute = ::blockchain::TreeRoute; /// Blockchain database client. Owns and manages a blockchain and a block queue. @@ -106,13 +108,18 @@ pub trait BlockChainClient : Sync + Send { } #[derive(Default, Clone, Debug, Eq, PartialEq)] +/// TODO [Gav Wood] Please document me pub struct ClientReport { + /// TODO [Gav Wood] Please document me pub blocks_imported: usize, + /// TODO [Gav Wood] Please document me pub transactions_applied: usize, + /// TODO [Gav Wood] Please document me pub gas_processed: U256, } impl ClientReport { + /// TODO [Gav Wood] Please document me pub fn accrue_block(&mut self, block: &PreVerifiedBlock) { self.blocks_imported += 1; self.transactions_applied += block.transactions.len(); diff --git a/src/engine.rs b/src/engine.rs index 79857a404..d94797290 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -31,11 +31,14 @@ pub trait Engine : Sync + Send { /// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`. fn maximum_extra_data_size(&self) -> usize { decode(&self.spec().engine_params.get("maximumExtraDataSize").unwrap()) } + /// TODO [Gav Wood] Please document me fn maximum_uncle_count(&self) -> usize { 2 } + /// TODO [Gav Wood] Please document me fn account_start_nonce(&self) -> U256 { decode(&self.spec().engine_params.get("accountStartNonce").unwrap()) } /// Block transformation functions, before and after the transactions. fn on_new_block(&self, _block: &mut Block) {} + /// TODO [Gav Wood] Please document me fn on_close_block(&self, _block: &mut Block) {} // TODO: consider including State in the params for verification functions. @@ -55,6 +58,7 @@ pub trait Engine : Sync + Send { // TODO: Add flags for which bits of the transaction to check. // TODO: consider including State in the params. fn verify_transaction_basic(&self, _t: &Transaction, _header: &Header) -> Result<(), Error> { Ok(()) } + /// TODO [Gav Wood] Please document me fn verify_transaction(&self, _t: &Transaction, _header: &Header) -> Result<(), Error> { Ok(()) } /// Don't forget to call Super::populateFromParent when subclassing & overriding. @@ -63,8 +67,11 @@ pub trait Engine : Sync + Send { // TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic // from Spec into here and removing the Spec::builtins field. + /// TODO [Gav Wood] Please document me fn is_builtin(&self, a: &Address) -> bool { self.spec().builtins.contains_key(a) } + /// TODO [Gav Wood] Please document me fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.spec().builtins.get(a).unwrap().cost(input.len()) } + /// TODO [Gav Wood] Please document me fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut [u8]) { self.spec().builtins.get(a).unwrap().execute(input, output); } // TODO: sealing stuff - though might want to leave this for later. diff --git a/src/env_info.rs b/src/env_info.rs index c12fa653c..1246234ff 100644 --- a/src/env_info.rs +++ b/src/env_info.rs @@ -25,6 +25,7 @@ pub struct EnvInfo { } impl EnvInfo { + /// TODO [debris] Please document me pub fn new() -> EnvInfo { EnvInfo { number: 0, diff --git a/src/error.rs b/src/error.rs index 088367ae3..bc2bdfe97 100644 --- a/src/error.rs +++ b/src/error.rs @@ -5,15 +5,22 @@ use header::BlockNumber; use basic_types::LogBloom; #[derive(Debug, PartialEq, Eq)] +/// TODO [Gav Wood] Please document me pub struct Mismatch { + /// TODO [Gav Wood] Please document me pub expected: T, + /// TODO [Gav Wood] Please document me pub found: T, } #[derive(Debug, PartialEq, Eq)] +/// TODO [Gav Wood] Please document me pub struct OutOfBounds { + /// TODO [Gav Wood] Please document me pub min: Option, + /// TODO [Gav Wood] Please document me pub max: Option, + /// TODO [Gav Wood] Please document me pub found: T, } @@ -22,58 +29,112 @@ pub struct OutOfBounds { pub enum ExecutionError { /// Returned when there gas paid for transaction execution is /// lower than base gas required. - NotEnoughBaseGas { required: U256, got: U256 }, + /// TODO [Gav Wood] Please document me + NotEnoughBaseGas { + /// TODO [Gav Wood] Please document me + required: U256, + /// TODO [Gav Wood] Please document me + got: U256 + }, /// Returned when block (gas_used + gas) > gas_limit. /// /// If gas =< gas_limit, upstream may try to execute the transaction /// in next block. - BlockGasLimitReached { gas_limit: U256, gas_used: U256, gas: U256 }, + BlockGasLimitReached { + /// TODO [Gav Wood] Please document me + gas_limit: U256, + /// TODO [Gav Wood] Please document me + gas_used: U256, + /// TODO [Gav Wood] Please document me + gas: U256 + }, /// Returned when transaction nonce does not match state nonce. - InvalidNonce { expected: U256, got: U256 }, + InvalidNonce { + /// TODO [Gav Wood] Please document me + expected: U256, + /// TODO [Gav Wood] Please document me + got: U256 + }, /// Returned when cost of transaction (value + gas_price * gas) exceeds /// current sender balance. - NotEnoughCash { required: U512, got: U512 }, + NotEnoughCash { + /// TODO [Gav Wood] Please document me + required: U512, + /// TODO [Gav Wood] Please document me + got: U512 + }, /// Returned when internal evm error occurs. Internal } #[derive(Debug)] +/// TODO [Gav Wood] Please document me pub enum TransactionError { + /// TODO [Gav Wood] Please document me InvalidGasLimit(OutOfBounds), } #[derive(Debug, PartialEq, Eq)] +/// TODO [arkpar] Please document me pub enum BlockError { + /// TODO [Gav Wood] Please document me TooManyUncles(OutOfBounds), + /// TODO [Gav Wood] Please document me UncleWrongGeneration, + /// TODO [Gav Wood] Please document me ExtraDataOutOfBounds(OutOfBounds), + /// TODO [arkpar] Please document me InvalidSealArity(Mismatch), + /// TODO [arkpar] Please document me TooMuchGasUsed(OutOfBounds), + /// TODO [arkpar] Please document me InvalidUnclesHash(Mismatch), + /// TODO [arkpar] Please document me UncleTooOld(OutOfBounds), + /// TODO [arkpar] Please document me UncleIsBrother(OutOfBounds), + /// TODO [arkpar] Please document me UncleInChain(H256), + /// TODO [arkpar] Please document me UncleParentNotInChain(H256), + /// TODO [arkpar] Please document me InvalidStateRoot(Mismatch), + /// TODO [arkpar] Please document me InvalidGasUsed(Mismatch), + /// TODO [arkpar] Please document me InvalidTransactionsRoot(Mismatch), + /// TODO [arkpar] Please document me InvalidDifficulty(Mismatch), + /// TODO [arkpar] Please document me InvalidGasLimit(OutOfBounds), + /// TODO [arkpar] Please document me InvalidReceiptsStateRoot(Mismatch), + /// TODO [arkpar] Please document me InvalidTimestamp(OutOfBounds), + /// TODO [arkpar] Please document me InvalidLogBloom(Mismatch), + /// TODO [arkpar] Please document me InvalidEthashDifficulty(Mismatch), + /// TODO [arkpar] Please document me InvalidBlockNonce(Mismatch), + /// TODO [arkpar] Please document me InvalidParentHash(Mismatch), + /// TODO [arkpar] Please document me InvalidNumber(OutOfBounds), + /// TODO [arkpar] Please document me UnknownParent(H256), + /// TODO [Gav Wood] Please document me UnknownUncleParent(H256), } #[derive(Debug)] +/// TODO [arkpar] Please document me pub enum ImportError { + /// TODO [arkpar] Please document me Bad(Option), + /// TODO [arkpar] Please document me AlreadyInChain, + /// TODO [arkpar] Please document me AlreadyQueued, } @@ -89,10 +150,15 @@ pub type ImportResult = Result<(), ImportError>; #[derive(Debug)] /// General error type which should be capable of representing all errors in ethcore. pub enum Error { + /// TODO [Gav Wood] Please document me Util(UtilError), + /// TODO [Gav Wood] Please document me Block(BlockError), + /// TODO [Gav Wood] Please document me UnknownEngineName(String), + /// TODO [Gav Wood] Please document me Execution(ExecutionError), + /// TODO [Gav Wood] Please document me Transaction(TransactionError), } diff --git a/src/ethereum/denominations.rs b/src/ethereum/denominations.rs index a23c94b4c..093be89a7 100644 --- a/src/ethereum/denominations.rs +++ b/src/ethereum/denominations.rs @@ -1,17 +1,22 @@ use util::*; #[inline] +/// TODO [debris] Please document me pub fn ether() -> U256 { U256::exp10(18) } #[inline] +/// TODO [debris] Please document me pub fn finney() -> U256 { U256::exp10(15) } #[inline] +/// TODO [debris] Please document me pub fn szabo() -> U256 { U256::exp10(12) } #[inline] +/// TODO [debris] Please document me pub fn shannon() -> U256 { U256::exp10(9) } #[inline] +/// TODO [debris] Please document me pub fn wei() -> U256 { U256::exp10(0) } diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index f559446ce..ecfdb0541 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -19,6 +19,7 @@ pub struct Ethash { } impl Ethash { + /// TODO [arkpar] Please document me pub fn new_boxed(spec: Spec) -> Box { Box::new(Ethash { spec: spec, diff --git a/src/ethereum/mod.rs b/src/ethereum/mod.rs index e97ac79a3..66bb7f356 100644 --- a/src/ethereum/mod.rs +++ b/src/ethereum/mod.rs @@ -3,7 +3,9 @@ //! Contains all Ethereum network specific stuff, such as denominations and //! consensus specifications. +/// TODO [Gav Wood] Please document me pub mod ethash; +/// TODO [Gav Wood] Please document me pub mod denominations; pub use self::ethash::*; diff --git a/src/evm/evm.rs b/src/evm/evm.rs index 696d49474..b90ea5f97 100644 --- a/src/evm/evm.rs +++ b/src/evm/evm.rs @@ -15,23 +15,31 @@ pub enum Error { /// `BadJumpDestination` is returned when execution tried to move /// to position that wasn't marked with JUMPDEST instruction BadJumpDestination { + /// TODO [Tomusdrw] Please document me destination: usize }, /// `BadInstructions` is returned when given instruction is not supported BadInstruction { + /// TODO [Tomusdrw] Please document me instruction: u8, }, /// `StackUnderflow` when there is not enough stack elements to execute instruction /// First parameter says how many elements were needed and the second how many were actually on Stack StackUnderflow { + /// TODO [Tomusdrw] Please document me instruction: &'static str, + /// TODO [Tomusdrw] Please document me wanted: usize, + /// TODO [Tomusdrw] Please document me on_stack: usize }, /// When execution would exceed defined Stack Limit OutOfStack { + /// TODO [Tomusdrw] Please document me instruction: &'static str, + /// TODO [Tomusdrw] Please document me wanted: usize, + /// TODO [Tomusdrw] Please document me limit: usize }, /// Returned on evm internal error. Should never be ignored during development. diff --git a/src/evm/ext.rs b/src/evm/ext.rs index 924db7a71..4d2471593 100644 --- a/src/evm/ext.rs +++ b/src/evm/ext.rs @@ -26,6 +26,7 @@ pub enum MessageCallResult { Failed } +/// TODO [debris] Please document me pub trait Ext { /// Returns a value for given key. fn storage_at(&self, key: &H256) -> H256; diff --git a/src/evm/factory.rs b/src/evm/factory.rs index 3dde4bb6d..439bee5da 100644 --- a/src/evm/factory.rs +++ b/src/evm/factory.rs @@ -3,8 +3,11 @@ use std::fmt; use evm::Evm; #[derive(Clone)] +/// TODO [Tomusdrw] Please document me pub enum VMType { + /// TODO [Tomusdrw] Please document me Jit, + /// TODO [Tomusdrw] Please document me Interpreter } diff --git a/src/evm/mod.rs b/src/evm/mod.rs index 2ed9a1146..1426bc281 100644 --- a/src/evm/mod.rs +++ b/src/evm/mod.rs @@ -2,6 +2,7 @@ pub mod ext; pub mod evm; +/// TODO [Tomusdrw] Please document me pub mod interpreter; #[macro_use] pub mod factory; diff --git a/src/evm/schedule.rs b/src/evm/schedule.rs index f820f85e7..70edfceea 100644 --- a/src/evm/schedule.rs +++ b/src/evm/schedule.rs @@ -2,36 +2,67 @@ /// Definition of the cost schedule and other parameterisations for the EVM. pub struct Schedule { + /// TODO [Gav Wood] Please document me pub exceptional_failed_code_deposit: bool, + /// TODO [Gav Wood] Please document me pub have_delegate_call: bool, + /// TODO [Tomusdrw] Please document me pub stack_limit: usize, + /// TODO [Gav Wood] Please document me pub max_depth: usize, + /// TODO [Gav Wood] Please document me pub tier_step_gas: [usize; 8], + /// TODO [Gav Wood] Please document me pub exp_gas: usize, + /// TODO [Gav Wood] Please document me pub exp_byte_gas: usize, + /// TODO [Gav Wood] Please document me pub sha3_gas: usize, + /// TODO [Gav Wood] Please document me pub sha3_word_gas: usize, + /// TODO [Gav Wood] Please document me pub sload_gas: usize, + /// TODO [Gav Wood] Please document me pub sstore_set_gas: usize, + /// TODO [Gav Wood] Please document me pub sstore_reset_gas: usize, + /// TODO [Gav Wood] Please document me pub sstore_refund_gas: usize, + /// TODO [Gav Wood] Please document me pub jumpdest_gas: usize, + /// TODO [Gav Wood] Please document me pub log_gas: usize, + /// TODO [Gav Wood] Please document me pub log_data_gas: usize, + /// TODO [Gav Wood] Please document me pub log_topic_gas: usize, + /// TODO [Gav Wood] Please document me pub create_gas: usize, + /// TODO [Gav Wood] Please document me pub call_gas: usize, + /// TODO [Gav Wood] Please document me pub call_stipend: usize, + /// TODO [Gav Wood] Please document me pub call_value_transfer_gas: usize, + /// TODO [Gav Wood] Please document me pub call_new_account_gas: usize, + /// TODO [Gav Wood] Please document me pub suicide_refund_gas: usize, + /// TODO [Gav Wood] Please document me pub memory_gas: usize, + /// TODO [Gav Wood] Please document me pub quad_coeff_div: usize, + /// TODO [Gav Wood] Please document me pub create_data_gas: usize, + /// TODO [Gav Wood] Please document me pub tx_gas: usize, + /// TODO [Gav Wood] Please document me pub tx_create_gas: usize, + /// TODO [Gav Wood] Please document me pub tx_data_zero_gas: usize, + /// TODO [Gav Wood] Please document me pub tx_data_non_zero_gas: usize, + /// TODO [Gav Wood] Please document me pub copy_gas: usize, } diff --git a/src/extras.rs b/src/extras.rs index 8052af791..fb4748447 100644 --- a/src/extras.rs +++ b/src/extras.rs @@ -5,15 +5,21 @@ use rocksdb::{DB, Writable}; /// Represents index of extra data in database #[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] pub enum ExtrasIndex { + /// TODO [debris] Please document me BlockDetails = 0, + /// TODO [debris] Please document me BlockHash = 1, + /// TODO [debris] Please document me TransactionAddress = 2, + /// TODO [debris] Please document me BlockLogBlooms = 3, + /// TODO [debris] Please document me BlocksBlooms = 4 } /// trait used to write Extras data to db pub trait ExtrasWritable { + /// TODO [debris] Please document me fn put_extras(&self, hash: &K, value: &T) where T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable; @@ -21,10 +27,12 @@ pub trait ExtrasWritable { /// trait used to read Extras data from db pub trait ExtrasReadable { + /// TODO [debris] Please document me fn get_extras(&self, hash: &K) -> Option where T: ExtrasIndexable + Decodable, K: ExtrasSliceConvertable; + /// TODO [debris] Please document me fn extras_exists(&self, hash: &K) -> bool where T: ExtrasIndexable, K: ExtrasSliceConvertable; @@ -58,7 +66,9 @@ impl ExtrasReadable for DB { /// Implementations should convert arbitrary type to database key slice pub trait ExtrasSliceConvertable { + /// TODO [Gav Wood] Please document me fn to_extras_slice(&self, i: ExtrasIndex) -> H264; + /// TODO [debris] Please document me fn as_h256(&self) -> Option<&H256> { None } } @@ -86,6 +96,7 @@ impl ExtrasSliceConvertable for BlockNumber { /// Types implementing this trait can be indexed in extras database pub trait ExtrasIndexable { + /// TODO [debris] Please document me fn extras_index() -> ExtrasIndex; } @@ -98,9 +109,13 @@ impl ExtrasIndexable for H256 { /// Familial details concerning a block #[derive(Debug, Clone)] pub struct BlockDetails { + /// TODO [debris] Please document me pub number: BlockNumber, + /// TODO [debris] Please document me pub total_difficulty: U256, + /// TODO [debris] Please document me pub parent: H256, + /// TODO [debris] Please document me pub children: Vec } @@ -143,6 +158,7 @@ impl Encodable for BlockDetails { /// Log blooms of certain block #[derive(Clone)] pub struct BlockLogBlooms { + /// TODO [debris] Please document me pub blooms: Vec } @@ -176,6 +192,7 @@ impl Encodable for BlockLogBlooms { /// Neighboring log blooms on certain level pub struct BlocksBlooms { + /// TODO [debris] Please document me pub blooms: [H2048; 16] } @@ -223,7 +240,9 @@ impl Encodable for BlocksBlooms { /// Represents address of certain transaction within block #[derive(Clone)] pub struct TransactionAddress { + /// TODO [debris] Please document me pub block_hash: H256, + /// TODO [debris] Please document me pub index: u64 } diff --git a/src/header.rs b/src/header.rs index 0986a0d80..28ed458fb 100644 --- a/src/header.rs +++ b/src/header.rs @@ -2,6 +2,7 @@ use util::*; use basic_types::*; use time::now_utc; +/// TODO [Gav Wood] Please document me pub type BlockNumber = u64; /// A block header. @@ -13,25 +14,41 @@ pub type BlockNumber = u64; #[derive(Debug, Clone)] pub struct Header { // TODO: make all private. + /// TODO [Gav Wood] Please document me pub parent_hash: H256, + /// TODO [arkpar] Please document me pub timestamp: u64, + /// TODO [debris] Please document me pub number: BlockNumber, + /// TODO [Gav Wood] Please document me pub author: Address, + /// TODO [debris] Please document me pub transactions_root: H256, + /// TODO [debris] Please document me pub uncles_hash: H256, + /// TODO [Gav Wood] Please document me pub extra_data: Bytes, + /// TODO [debris] Please document me pub state_root: H256, + /// TODO [debris] Please document me pub receipts_root: H256, + /// TODO [debris] Please document me pub log_bloom: LogBloom, + /// TODO [debris] Please document me pub gas_used: U256, + /// TODO [Gav Wood] Please document me pub gas_limit: U256, + /// TODO [debris] Please document me pub difficulty: U256, + /// TODO [arkpar] Please document me pub seal: Vec, + /// TODO [arkpar] Please document me pub hash: RefCell>, + /// TODO [Gav Wood] Please document me pub bare_hash: RefCell>, } @@ -61,32 +78,50 @@ impl Header { } } + /// TODO [Gav Wood] Please document me pub fn number(&self) -> BlockNumber { self.number } + /// TODO [Gav Wood] Please document me pub fn timestamp(&self) -> u64 { self.timestamp } + /// TODO [Gav Wood] Please document me pub fn author(&self) -> &Address { &self.author } + /// TODO [Gav Wood] Please document me pub fn extra_data(&self) -> &Bytes { &self.extra_data } + /// TODO [Gav Wood] Please document me pub fn state_root(&self) -> &H256 { &self.state_root } + /// TODO [Gav Wood] Please document me pub fn receipts_root(&self) -> &H256 { &self.receipts_root } + /// TODO [Gav Wood] Please document me pub fn gas_limit(&self) -> &U256 { &self.gas_limit } + /// TODO [Gav Wood] Please document me pub fn difficulty(&self) -> &U256 { &self.difficulty } + /// TODO [Gav Wood] Please document me pub fn seal(&self) -> &Vec { &self.seal } // TODO: seal_at, set_seal_at &c. + /// TODO [Gav Wood] Please document me pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); } + /// TODO [Gav Wood] Please document me pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); } + /// TODO [Gav Wood] Please document me pub fn set_timestamp_now(&mut self) { self.timestamp = now_utc().to_timespec().sec as u64; self.note_dirty(); } + /// TODO [Gav Wood] Please document me pub fn set_author(&mut self, a: Address) { if a != self.author { self.author = a; self.note_dirty(); } } + /// TODO [Gav Wood] Please document me pub fn set_extra_data(&mut self, a: Bytes) { if a != self.extra_data { self.extra_data = a; self.note_dirty(); } } + /// TODO [Gav Wood] Please document me pub fn set_gas_used(&mut self, a: U256) { self.gas_used = a; self.note_dirty(); } + /// TODO [Gav Wood] Please document me pub fn set_gas_limit(&mut self, a: U256) { self.gas_limit = a; self.note_dirty(); } + /// TODO [Gav Wood] Please document me pub fn set_difficulty(&mut self, a: U256) { self.difficulty = a; self.note_dirty(); } + /// TODO [Gav Wood] Please document me pub fn set_seal(&mut self, a: Vec) { self.seal = a; self.note_dirty(); } /// Get the hash of this header (sha3 of the RLP). @@ -120,6 +155,7 @@ impl Header { } // TODO: make these functions traity + /// TODO [Gav Wood] Please document me pub fn stream_rlp(&self, s: &mut RlpStream, with_seal: Seal) { s.append_list(13 + match with_seal { Seal::With => self.seal.len(), _ => 0 }); s.append(&self.parent_hash); @@ -141,12 +177,14 @@ impl Header { } } + /// TODO [Gav Wood] Please document me pub fn rlp(&self, with_seal: Seal) -> Bytes { let mut s = RlpStream::new(); self.stream_rlp(&mut s, with_seal); s.out() } + /// TODO [debris] Please document me pub fn rlp_sha3(&self, with_seal: Seal) -> H256 { self.rlp(with_seal).sha3() } } diff --git a/src/lib.rs b/src/lib.rs index 20c59eafa..a5b6c3dae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +#![warn(missing_docs)] #![feature(cell_extras)] #![feature(augmented_assignments)] //#![feature(plugin)] @@ -88,31 +89,51 @@ extern crate evmjit; #[macro_use] extern crate ethcore_util as util; +/// TODO [Gav Wood] Please document me pub mod common; +/// TODO [Tomusdrw] Please document me pub mod basic_types; #[macro_use] pub mod evm; pub mod error; +/// TODO [Gav Wood] Please document me pub mod log_entry; +/// TODO [Gav Wood] Please document me pub mod env_info; +/// TODO [Gav Wood] Please document me pub mod pod_account; +/// TODO [Gav Wood] Please document me pub mod pod_state; +/// TODO [Gav Wood] Please document me pub mod account_diff; +/// TODO [Gav Wood] Please document me pub mod state_diff; +/// TODO [Gav Wood] Please document me pub mod engine; +/// TODO [Gav Wood] Please document me pub mod state; +/// TODO [Gav Wood] Please document me pub mod account; pub mod action_params; +/// TODO [debris] Please document me pub mod header; +/// TODO [Gav Wood] Please document me pub mod transaction; +/// TODO [Gav Wood] Please document me pub mod receipt; +/// TODO [Gav Wood] Please document me pub mod null_engine; +/// TODO [Gav Wood] Please document me pub mod builtin; +/// TODO [debris] Please document me pub mod spec; pub mod views; pub mod blockchain; +/// TODO [Gav Wood] Please document me pub mod extras; +/// TODO [arkpar] Please document me pub mod substate; +/// TODO [Gav Wood] Please document me pub mod service; pub mod executive; pub mod externalities; @@ -120,9 +141,14 @@ pub mod externalities; #[cfg(test)] mod tests; +/// TODO [arkpar] Please document me pub mod client; +/// TODO [arkpar] Please document me pub mod sync; +/// TODO [arkpar] Please document me pub mod block; +/// TODO [arkpar] Please document me pub mod verification; +/// TODO [debris] Please document me pub mod queue; pub mod ethereum; diff --git a/src/log_entry.rs b/src/log_entry.rs index cd4353874..a791b38a6 100644 --- a/src/log_entry.rs +++ b/src/log_entry.rs @@ -4,8 +4,11 @@ use basic_types::LogBloom; /// A single log's entry. #[derive(Debug,PartialEq,Eq)] pub struct LogEntry { + /// TODO [Gav Wood] Please document me pub address: Address, + /// TODO [Gav Wood] Please document me pub topics: Vec, + /// TODO [Gav Wood] Please document me pub data: Bytes, } diff --git a/src/null_engine.rs b/src/null_engine.rs index e673563b3..3b03508a2 100644 --- a/src/null_engine.rs +++ b/src/null_engine.rs @@ -11,6 +11,7 @@ pub struct NullEngine { } impl NullEngine { + /// TODO [Tomusdrw] Please document me pub fn new_boxed(spec: Spec) -> Box { Box::new(NullEngine{ spec: spec, diff --git a/src/pod_account.rs b/src/pod_account.rs index 29b43c0bf..81b8b1c44 100644 --- a/src/pod_account.rs +++ b/src/pod_account.rs @@ -4,9 +4,13 @@ use account::*; #[derive(Debug,Clone,PartialEq,Eq)] /// Genesis account data. Does not have a DB overlay cache. pub struct PodAccount { + /// TODO [Gav Wood] Please document me pub balance: U256, + /// TODO [Gav Wood] Please document me pub nonce: U256, + /// TODO [Gav Wood] Please document me pub code: Bytes, + /// TODO [Gav Wood] Please document me pub storage: BTreeMap, } @@ -27,6 +31,7 @@ impl PodAccount { } } + /// TODO [Gav Wood] Please document me pub fn rlp(&self) -> Bytes { let mut stream = RlpStream::new_list(4); stream.append(&self.nonce); diff --git a/src/pod_state.rs b/src/pod_state.rs index e3802c42a..2ab27ef1d 100644 --- a/src/pod_state.rs +++ b/src/pod_state.rs @@ -2,6 +2,7 @@ use util::*; use pod_account::*; #[derive(Debug,Clone,PartialEq,Eq)] +/// TODO [Gav Wood] Please document me pub struct PodState (BTreeMap); impl PodState { diff --git a/src/queue.rs b/src/queue.rs index 5803b3e5f..7c74b56d7 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -204,6 +204,7 @@ impl BlockQueue { verification.verified = new_verified; } + /// TODO [arkpar] Please document me pub fn drain(&mut self, max: usize) -> Vec { let mut verification = self.verification.lock().unwrap(); let count = min(max, verification.verified.len()); diff --git a/src/receipt.rs b/src/receipt.rs index 21a66f5cf..82620cfcd 100644 --- a/src/receipt.rs +++ b/src/receipt.rs @@ -5,13 +5,18 @@ use log_entry::LogEntry; /// Information describing execution of a transaction. #[derive(Debug)] pub struct Receipt { + /// TODO [Gav Wood] Please document me pub state_root: H256, + /// TODO [Gav Wood] Please document me pub gas_used: U256, + /// TODO [Gav Wood] Please document me pub log_bloom: LogBloom, + /// TODO [Gav Wood] Please document me pub logs: Vec, } impl Receipt { + /// TODO [Gav Wood] Please document me pub fn new(state_root: H256, gas_used: U256, logs: Vec) -> Receipt { Receipt { state_root: state_root, diff --git a/src/service.rs b/src/service.rs index 036c99bc4..30565b37a 100644 --- a/src/service.rs +++ b/src/service.rs @@ -33,10 +33,12 @@ impl ClientService { }) } + /// TODO [arkpar] Please document me pub fn io(&mut self) -> &mut IoService { self.net_service.io() } + /// TODO [arkpar] Please document me pub fn client(&self) -> Arc> { self.client.clone() } diff --git a/src/spec.rs b/src/spec.rs index e93b460c8..24c0e4eda 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -51,6 +51,7 @@ pub struct GenesisAccount { } impl GenesisAccount { + /// TODO [arkpar] Please document me pub fn rlp(&self) -> Bytes { let mut stream = RlpStream::new_list(4); stream.append(&self.nonce); @@ -66,27 +67,41 @@ impl GenesisAccount { #[derive(Debug)] pub struct Spec { // User friendly spec name + /// TODO [Gav Wood] Please document me pub name: String, // What engine are we using for this? + /// TODO [Gav Wood] Please document me pub engine_name: String, // Parameters concerning operation of the specific engine we're using. // Name -> RLP-encoded value + /// TODO [Gav Wood] Please document me pub engine_params: HashMap, // Builtin-contracts are here for now but would like to abstract into Engine API eventually. + /// TODO [Gav Wood] Please document me pub builtins: HashMap, // Genesis params. + /// TODO [Gav Wood] Please document me pub parent_hash: H256, + /// TODO [Gav Wood] Please document me pub author: Address, + /// TODO [Gav Wood] Please document me pub difficulty: U256, + /// TODO [Gav Wood] Please document me pub gas_limit: U256, + /// TODO [Gav Wood] Please document me pub gas_used: U256, + /// TODO [Gav Wood] Please document me pub timestamp: u64, + /// TODO [arkpar] Please document me pub extra_data: Bytes, + /// TODO [Gav Wood] Please document me pub genesis_state: HashMap, + /// TODO [Gav Wood] Please document me pub seal_fields: usize, + /// TODO [Gav Wood] Please document me pub seal_rlp: Bytes, // May be prepopulated if we know this in advance. @@ -112,6 +127,7 @@ impl Spec { self.state_root_memo.read().unwrap().as_ref().unwrap().clone() } + /// TODO [Gav Wood] Please document me pub fn genesis_header(&self) -> Header { Header { parent_hash: self.parent_hash.clone(), diff --git a/src/state.rs b/src/state.rs index e325b8d34..e45347fd2 100644 --- a/src/state.rs +++ b/src/state.rs @@ -5,6 +5,7 @@ use pod_account::*; use pod_state::*; use state_diff::*; +/// TODO [Gav Wood] Please document me pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. @@ -157,6 +158,7 @@ impl State { Ok(receipt) } + /// TODO [debris] Please document me pub fn revert(&mut self, backup: State) { self.cache = backup.cache; } diff --git a/src/state_diff.rs b/src/state_diff.rs index d603dda5e..28d92e59e 100644 --- a/src/state_diff.rs +++ b/src/state_diff.rs @@ -3,6 +3,7 @@ use pod_state::*; use account_diff::*; #[derive(Debug,Clone,PartialEq,Eq)] +/// TODO [Gav Wood] Please document me pub struct StateDiff (BTreeMap); impl StateDiff { diff --git a/src/substate.rs b/src/substate.rs index 9a1d6741e..5c4cde60c 100644 --- a/src/substate.rs +++ b/src/substate.rs @@ -25,6 +25,7 @@ impl Substate { } } + /// TODO [Gav Wood] Please document me pub fn accrue(&mut self, s: Substate) { self.suicides.extend(s.suicides.into_iter()); self.logs.extend(s.logs.into_iter()); diff --git a/src/sync/mod.rs b/src/sync/mod.rs index da91a6889..1ebc34d28 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -46,6 +46,7 @@ pub enum SyncMessage { BlockVerified, } +/// TODO [arkpar] Please document me pub type NetSyncMessage = NetworkIoMessage; /// Ethereum network protocol handler diff --git a/src/transaction.rs b/src/transaction.rs index 4f547a243..081870e4b 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -4,8 +4,11 @@ use error::*; use evm::Schedule; #[derive(Debug,Clone)] +/// TODO [Gav Wood] Please document me pub enum Action { + /// TODO [Gav Wood] Please document me Create, + /// TODO [debris] Please document me Call(Address), } @@ -13,16 +16,25 @@ pub enum Action { /// or contract creation operation. #[derive(Debug,Clone)] pub struct Transaction { + /// TODO [debris] Please document me pub nonce: U256, + /// TODO [debris] Please document me pub gas_price: U256, + /// TODO [debris] Please document me pub gas: U256, + /// TODO [debris] Please document me pub action: Action, + /// TODO [debris] Please document me pub value: U256, + /// TODO [Gav Wood] Please document me pub data: Bytes, // signature + /// TODO [Gav Wood] Please document me pub v: u8, + /// TODO [Gav Wood] Please document me pub r: U256, + /// TODO [debris] Please document me pub s: U256, hash: RefCell>, @@ -30,6 +42,7 @@ pub struct Transaction { } impl Transaction { + /// TODO [Gav Wood] Please document me pub fn new() -> Self { Transaction { nonce: x!(0), diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 479a91df0..8ee98441b 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -43,6 +43,7 @@ use std::ops::{Deref, DerefMut}; use uint::{Uint, U128, U256}; use hash::FixedHash; +/// TODO [Gav Wood] Please document me pub struct PrettySlice<'a> (&'a [u8]); impl<'a> fmt::Debug for PrettySlice<'a> { @@ -66,8 +67,11 @@ impl<'a> fmt::Display for PrettySlice<'a> { } } +/// TODO [Gav Wood] Please document me pub trait ToPretty { + /// TODO [Gav Wood] Please document me fn pretty(&self) -> PrettySlice; + /// TODO [Gav Wood] Please document me fn to_hex(&self) -> String { format!("{}", self.pretty()) } @@ -90,8 +94,11 @@ impl ToPretty for Bytes { } } +/// TODO [debris] Please document me pub enum BytesRef<'a> { + /// TODO [debris] Please document me Flexible(&'a mut Bytes), + /// TODO [debris] Please document me Fixed(&'a mut [u8]) } @@ -121,8 +128,11 @@ pub type Bytes = Vec; /// Slice of bytes to underlying memory pub trait BytesConvertable { // TODO: rename to as_slice + /// TODO [Gav Wood] Please document me fn bytes(&self) -> &[u8]; + /// TODO [Gav Wood] Please document me fn as_slice(&self) -> &[u8] { self.bytes() } + /// TODO [Gav Wood] Please document me fn to_bytes(&self) -> Bytes { self.as_slice().to_vec() } } @@ -160,8 +170,11 @@ fn bytes_convertable() { /// /// TODO: optimise some conversations pub trait ToBytes { + /// TODO [Gav Wood] Please document me fn to_bytes(&self) -> Vec; + /// TODO [Gav Wood] Please document me fn to_bytes_len(&self) -> usize { self.to_bytes().len() } + /// TODO [debris] Please document me fn first_byte(&self) -> Option { self.to_bytes().first().map(|&x| { x })} } @@ -257,7 +270,9 @@ impl ToBytes for T where T: FixedHash { /// Error returned when FromBytes conversation goes wrong #[derive(Debug, PartialEq, Eq)] pub enum FromBytesError { + /// TODO [debris] Please document me DataIsTooShort, + /// TODO [debris] Please document me DataIsTooLong } @@ -278,6 +293,7 @@ pub type FromBytesResult = Result; /// /// TODO: check size of bytes before conversation and return appropriate error pub trait FromBytes: Sized { + /// TODO [debris] Please document me fn from_bytes(bytes: &[u8]) -> FromBytesResult; } diff --git a/util/src/chainfilter.rs b/util/src/chainfilter.rs index e1804c191..41fce8521 100644 --- a/util/src/chainfilter.rs +++ b/util/src/chainfilter.rs @@ -49,7 +49,9 @@ use sha3::*; /// index. Their `BloomIndex` can be created from block number and given level. #[derive(Eq, PartialEq, Hash, Clone, Debug)] pub struct BloomIndex { + /// TODO [debris] Please document me pub level: u8, + /// TODO [debris] Please document me pub index: usize, } diff --git a/util/src/common.rs b/util/src/common.rs index 3ec02ad9b..0f10a4e97 100644 --- a/util/src/common.rs +++ b/util/src/common.rs @@ -46,6 +46,7 @@ macro_rules! flushln { ($fmt:expr, $($arg:tt)*) => (flush!(concat!($fmt, "\n"), $($arg)*)); } +/// TODO [Gav Wood] Please document me pub fn flush(s: String) { ::std::io::stdout().write(s.as_bytes()).unwrap(); ::std::io::stdout().flush().unwrap(); diff --git a/util/src/crypto.rs b/util/src/crypto.rs index e9e440024..79a952a94 100644 --- a/util/src/crypto.rs +++ b/util/src/crypto.rs @@ -4,8 +4,11 @@ use uint::*; use secp256k1::{key, Secp256k1}; use rand::os::OsRng; +/// TODO [Gav Wood] Please document me pub type Secret = H256; +/// TODO [Gav Wood] Please document me pub type Public = H512; +/// TODO [Gav Wood] Please document me pub type Signature = H520; lazy_static! { @@ -33,11 +36,17 @@ impl Signature { } #[derive(Debug)] +/// TODO [arkpar] Please document me pub enum CryptoError { + /// TODO [arkpar] Please document me InvalidSecret, + /// TODO [arkpar] Please document me InvalidPublic, + /// TODO [arkpar] Please document me InvalidSignature, + /// TODO [arkpar] Please document me InvalidMessage, + /// TODO [arkpar] Please document me Io(::std::io::Error), } @@ -122,6 +131,7 @@ impl KeyPair { pub fn sign(&self, message: &H256) -> Result { ec::sign(&self.secret, message) } } +/// TODO [arkpar] Please document me pub mod ec { use hash::*; use uint::*; @@ -198,10 +208,12 @@ pub mod ec { } } +/// TODO [arkpar] Please document me pub mod ecdh { use crypto::*; use crypto::{self}; + /// TODO [arkpar] Please document me pub fn agree(secret: &Secret, public: &Public, ) -> Result { use secp256k1::*; let context = &crypto::SECP256K1; @@ -217,11 +229,13 @@ pub mod ecdh { } } +/// TODO [arkpar] Please document me pub mod ecies { use hash::*; use bytes::*; use crypto::*; + /// TODO [arkpar] Please document me pub fn encrypt(public: &Public, plain: &[u8]) -> Result { use ::rcrypto::digest::Digest; use ::rcrypto::sha2::Sha256; @@ -257,6 +271,7 @@ pub mod ecies { Ok(msg) } + /// TODO [arkpar] Please document me pub fn decrypt(secret: &Secret, encrypted: &[u8]) -> Result { use ::rcrypto::digest::Digest; use ::rcrypto::sha2::Sha256; @@ -322,17 +337,20 @@ pub mod ecies { } } +/// TODO [arkpar] Please document me pub mod aes { use ::rcrypto::blockmodes::*; use ::rcrypto::aessafe::*; use ::rcrypto::symmetriccipher::*; use ::rcrypto::buffer::*; + /// TODO [arkpar] Please document me pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) { let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); } + /// TODO [arkpar] Please document me pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) { let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); diff --git a/util/src/error.rs b/util/src/error.rs index d9687183d..0d13d329c 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -6,23 +6,36 @@ use rlp::DecoderError; use io; #[derive(Debug)] +/// TODO [Gav Wood] Please document me pub enum BaseDataError { + /// TODO [Gav Wood] Please document me NegativelyReferencedHash, } #[derive(Debug)] /// General error type which should be capable of representing all errors in ethcore. pub enum UtilError { + /// TODO [Gav Wood] Please document me Crypto(::crypto::CryptoError), + /// TODO [Gav Wood] Please document me StdIo(::std::io::Error), + /// TODO [Gav Wood] Please document me Io(io::IoError), + /// TODO [Gav Wood] Please document me AddressParse(::std::net::AddrParseError), + /// TODO [Gav Wood] Please document me AddressResolve(Option<::std::io::Error>), + /// TODO [Gav Wood] Please document me FromHex(FromHexError), + /// TODO [Gav Wood] Please document me BaseData(BaseDataError), + /// TODO [Gav Wood] Please document me Network(NetworkError), + /// TODO [Gav Wood] Please document me Decoder(DecoderError), + /// TODO [Gav Wood] Please document me SimpleString(String), + /// TODO [Gav Wood] Please document me BadSize, } diff --git a/util/src/from_json.rs b/util/src/from_json.rs index 71a3f50e3..1d95df691 100644 --- a/util/src/from_json.rs +++ b/util/src/from_json.rs @@ -7,6 +7,8 @@ macro_rules! xjson { } } +/// TODO [Gav Wood] Please document me pub trait FromJson { + /// TODO [Gav Wood] Please document me fn from_json(json: &Json) -> Self; } diff --git a/util/src/hash.rs b/util/src/hash.rs index 17057ef07..352efed7d 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -13,20 +13,33 @@ use uint::{Uint, U256}; /// /// Note: types implementing `FixedHash` must be also `BytesConvertable`. pub trait FixedHash: Sized + BytesConvertable + Populatable + FromStr + Default { + /// TODO [Gav Wood] Please document me fn new() -> Self; /// Synonym for `new()`. Prefer to new as it's more readable. fn zero() -> Self; + /// TODO [debris] Please document me fn random() -> Self; + /// TODO [debris] Please document me fn randomize(&mut self); + /// TODO [arkpar] Please document me fn size() -> usize; + /// TODO [arkpar] Please document me fn from_slice(src: &[u8]) -> Self; + /// TODO [arkpar] Please document me fn clone_from_slice(&mut self, src: &[u8]) -> usize; + /// TODO [Gav Wood] Please document me fn copy_to(&self, dest: &mut [u8]); + /// TODO [Gav Wood] Please document me fn shift_bloomed<'a, T>(&'a mut self, b: &T) -> &'a mut Self where T: FixedHash; + /// TODO [debris] Please document me fn with_bloomed(mut self, b: &T) -> Self where T: FixedHash { self.shift_bloomed(b); self } + /// TODO [Gav Wood] Please document me fn bloom_part(&self, m: usize) -> T where T: FixedHash; + /// TODO [debris] Please document me fn contains_bloomed(&self, b: &T) -> bool where T: FixedHash; + /// TODO [arkpar] Please document me fn contains<'a>(&'a self, b: &'a Self) -> bool; + /// TODO [debris] Please document me fn is_zero(&self) -> bool; } @@ -41,6 +54,7 @@ fn clean_0x(s: &str) -> &str { macro_rules! impl_hash { ($from: ident, $size: expr) => { #[derive(Eq)] + /// TODO [Gav Wood] Please document me pub struct $from (pub [u8; $size]); impl BytesConvertable for $from { @@ -396,10 +410,12 @@ macro_rules! impl_hash { } impl $from { + /// TODO [Gav Wood] Please document me pub fn hex(&self) -> String { format!("{:?}", self) } + /// TODO [Gav Wood] Please document me pub fn from_bloomed(b: &T) -> Self where T: FixedHash { b.bloom_part($size) } } @@ -503,21 +519,25 @@ impl<'_> From<&'_ Address> for H256 { } } +/// TODO [Gav Wood] Please document me pub fn h256_from_hex(s: &str) -> H256 { use std::str::FromStr; H256::from_str(s).unwrap() } +/// TODO [Gav Wood] Please document me pub fn h256_from_u64(n: u64) -> H256 { use uint::U256; H256::from(&U256::from(n)) } +/// TODO [Gav Wood] Please document me pub fn address_from_hex(s: &str) -> Address { use std::str::FromStr; Address::from_str(s).unwrap() } +/// TODO [Gav Wood] Please document me pub fn address_from_u64(n: u64) -> Address { let h256 = h256_from_u64(n); From::from(h256) diff --git a/util/src/io/mod.rs b/util/src/io/mod.rs index 23a8509cc..c59de0584 100644 --- a/util/src/io/mod.rs +++ b/util/src/io/mod.rs @@ -38,7 +38,9 @@ mod service; #[derive(Debug)] +/// TODO [arkpar] Please document me pub enum IoError { + /// TODO [arkpar] Please document me Mio(::std::io::Error), } @@ -66,10 +68,15 @@ pub trait IoHandler: Send where Message: Send + 'static { fn stream_writable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {} } +/// TODO [arkpar] Please document me pub type TimerToken = service::TimerToken; +/// TODO [arkpar] Please document me pub type StreamToken = service::StreamToken; +/// TODO [arkpar] Please document me pub type IoContext<'s, M> = service::IoContext<'s, M>; +/// TODO [arkpar] Please document me pub type IoService = service::IoService; +/// TODO [arkpar] Please document me pub type IoChannel = service::IoChannel; //pub const USER_TOKEN_START: usize = service::USER_TOKEN; // TODO: ICE in rustc 1.7.0-nightly (49c382779 2016-01-12) diff --git a/util/src/json_aid.rs b/util/src/json_aid.rs index 79a71cac6..595484908 100644 --- a/util/src/json_aid.rs +++ b/util/src/json_aid.rs @@ -1,5 +1,6 @@ use common::*; +/// TODO [Gav Wood] Please document me pub fn clean(s: &str) -> &str { if s.len() >= 2 && &s[0..2] == "0x" { &s[2..] diff --git a/util/src/lib.rs b/util/src/lib.rs index 4b50a0f9b..1cfa11657 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,3 +1,4 @@ +#![warn(missing_docs)] #![feature(op_assign_traits)] #![feature(augmented_assignments)] #![feature(associated_consts)] @@ -51,17 +52,22 @@ extern crate secp256k1; extern crate arrayvec; extern crate elastic_array; +/// TODO [Gav Wood] Please document me pub mod standard; #[macro_use] +/// TODO [Gav Wood] Please document me pub mod from_json; #[macro_use] +/// TODO [Gav Wood] Please document me pub mod common; pub mod error; pub mod hash; pub mod uint; pub mod bytes; pub mod rlp; +/// TODO [Gav Wood] Please document me pub mod misc; +/// TODO [Gav Wood] Please document me pub mod json_aid; pub mod vector; pub mod sha3; @@ -69,16 +75,23 @@ pub mod hashdb; pub mod memorydb; pub mod overlaydb; pub mod journaldb; +/// TODO [Gav Wood] Please document me pub mod math; pub mod chainfilter; +/// TODO [Gav Wood] Please document me pub mod crypto; pub mod triehash; +/// TODO [Gav Wood] Please document me pub mod trie; pub mod nibbleslice; +/// TODO [Gav Wood] Please document me pub mod heapsizeof; pub mod squeeze; +/// TODO [Gav Wood] Please document me pub mod semantic_version; +/// TODO [Gav Wood] Please document me pub mod io; +/// TODO [Gav Wood] Please document me pub mod network; pub use common::*; diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 6f2b2e603..f2935cceb 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -107,12 +107,14 @@ impl MemoryDB { self.data.get(key) } + /// TODO [Gav Wood] Please document me pub fn drain(&mut self) -> HashMap { let mut data = HashMap::new(); mem::swap(&mut self.data, &mut data); data } + /// TODO [Gav Wood] Please document me pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) { if self.raw(key) == None { unsafe { diff --git a/util/src/misc.rs b/util/src/misc.rs index e5efd33bb..b28b8df42 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -3,9 +3,13 @@ use common::*; #[derive(Debug,Clone,PartialEq,Eq)] /// Diff type for specifying a change (or not). pub enum Diff where T: Eq { + /// TODO [Gav Wood] Please document me Same, + /// TODO [Gav Wood] Please document me Born(T), + /// TODO [Gav Wood] Please document me Changed(T, T), + /// TODO [Gav Wood] Please document me Died(T), } @@ -26,6 +30,8 @@ impl Diff where T: Eq { #[derive(PartialEq,Eq,Clone,Copy)] /// Boolean type for clean/dirty status. pub enum Filth { + /// TODO [Gav Wood] Please document me Clean, + /// TODO [Gav Wood] Please document me Dirty, } diff --git a/util/src/network/mod.rs b/util/src/network/mod.rs index a47e88927..2c9e71585 100644 --- a/util/src/network/mod.rs +++ b/util/src/network/mod.rs @@ -56,12 +56,18 @@ mod service; mod error; mod node; +/// TODO [arkpar] Please document me pub type PeerId = host::PeerId; +/// TODO [arkpar] Please document me pub type PacketId = host::PacketId; +/// TODO [arkpar] Please document me pub type NetworkContext<'s,'io, Message> = host::NetworkContext<'s, 'io, Message>; +/// TODO [arkpar] Please document me pub type NetworkService = service::NetworkService; +/// TODO [arkpar] Please document me pub type NetworkIoMessage = host::NetworkIoMessage; pub use network::host::NetworkIoMessage::User as UserMessage; +/// TODO [arkpar] Please document me pub type NetworkError = error::NetworkError; use io::*; diff --git a/util/src/nibbleslice.rs b/util/src/nibbleslice.rs index b9028dff3..c0d076440 100644 --- a/util/src/nibbleslice.rs +++ b/util/src/nibbleslice.rs @@ -34,6 +34,7 @@ pub struct NibbleSlice<'a> { offset_encode_suffix: usize, } +/// TODO [Gav Wood] Please document me pub struct NibbleSliceIterator<'a> { p: &'a NibbleSlice<'a>, i: usize, @@ -76,6 +77,7 @@ impl<'a, 'view> NibbleSlice<'a> where 'a: 'view { (r, a.len() + b.len()) }*/ + /// TODO [Gav Wood] Please document me pub fn iter(&'a self) -> NibbleSliceIterator<'a> { NibbleSliceIterator { p: self, i: 0 } } @@ -130,6 +132,7 @@ impl<'a, 'view> NibbleSlice<'a> where 'a: 'view { i } + /// TODO [Gav Wood] Please document me pub fn encoded(&self, is_leaf: bool) -> Bytes { let l = self.len(); let mut r = Bytes::with_capacity(l / 2 + 1); @@ -142,6 +145,7 @@ impl<'a, 'view> NibbleSlice<'a> where 'a: 'view { r } + /// TODO [Gav Wood] Please document me pub fn encoded_leftmost(&self, n: usize, is_leaf: bool) -> Bytes { let l = min(self.len(), n); let mut r = Bytes::with_capacity(l / 2 + 1); diff --git a/util/src/rlp/mod.rs b/util/src/rlp/mod.rs index fc0a4d288..f6acdf180 100644 --- a/util/src/rlp/mod.rs +++ b/util/src/rlp/mod.rs @@ -30,10 +30,15 @@ //! * You want to get view onto rlp-slice. //! * You don't want to decode whole rlp at once. +/// TODO [Gav Wood] Please document me pub mod rlptraits; +/// TODO [Gav Wood] Please document me pub mod rlperrors; +/// TODO [debris] Please document me pub mod rlpin; +/// TODO [debris] Please document me pub mod untrusted_rlp; +/// TODO [debris] Please document me pub mod rlpstream; #[cfg(test)] @@ -46,9 +51,13 @@ pub use self::rlpin::{Rlp, RlpIterator}; pub use self::rlpstream::{RlpStream,RlpStandard}; use super::hash::H256; +/// TODO [arkpar] Please document me pub const NULL_RLP: [u8; 1] = [0x80; 1]; +/// TODO [Gav Wood] Please document me pub const EMPTY_LIST_RLP: [u8; 1] = [0xC0; 1]; +/// TODO [arkpar] Please document me pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] ); +/// TODO [debris] Please document me pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] ); /// Shortcut function to decode trusted rlp diff --git a/util/src/rlp/rlperrors.rs b/util/src/rlp/rlperrors.rs index 9ea470abf..97adbced1 100644 --- a/util/src/rlp/rlperrors.rs +++ b/util/src/rlp/rlperrors.rs @@ -3,14 +3,23 @@ use std::error::Error as StdError; use bytes::FromBytesError; #[derive(Debug, PartialEq, Eq)] +/// TODO [debris] Please document me pub enum DecoderError { + /// TODO [debris] Please document me FromBytesError(FromBytesError), + /// TODO [debris] Please document me RlpIsTooShort, + /// TODO [debris] Please document me RlpExpectedToBeList, + /// TODO [Gav Wood] Please document me RlpExpectedToBeData, + /// TODO [Gav Wood] Please document me RlpIncorrectListLen, + /// TODO [Gav Wood] Please document me RlpDataLenWithZeroPrefix, + /// TODO [Gav Wood] Please document me RlpListLenWithZeroPrefix, + /// TODO [debris] Please document me RlpInvalidIndirection, } diff --git a/util/src/rlp/rlpin.rs b/util/src/rlp/rlpin.rs index 159130750..5cecce44f 100644 --- a/util/src/rlp/rlpin.rs +++ b/util/src/rlp/rlpin.rs @@ -103,10 +103,12 @@ impl <'a, 'view> Rlp<'a> where 'a: 'view { res.unwrap_or_else(|_| panic!()) } + /// TODO [debris] Please document me pub fn as_val(&self) -> T where T: Decodable { Self::view_as_val(self) } + /// TODO [debris] Please document me pub fn val_at(&self, index: usize) -> T where T: Decodable { Self::view_as_val(&self.at(index)) } diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index a30978f24..724974e08 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -223,15 +223,19 @@ impl Encoder for BasicEncoder { } } +/// TODO [Gav Wood] Please document me pub trait RlpStandard { + /// TODO [Gav Wood] Please document me fn rlp_append(&self, s: &mut RlpStream); + /// TODO [Gav Wood] Please document me fn rlp_bytes(&self) -> Bytes { let mut s = RlpStream::new(); self.rlp_append(&mut s); s.out() } + /// TODO [Gav Wood] Please document me fn rlp_sha3(&self) -> H256 { self.rlp_bytes().sha3() } } diff --git a/util/src/rlp/rlptraits.rs b/util/src/rlp/rlptraits.rs index 407d62daf..6fbc2125c 100644 --- a/util/src/rlp/rlptraits.rs +++ b/util/src/rlp/rlptraits.rs @@ -1,23 +1,36 @@ use rlp::{DecoderError, UntrustedRlp}; +/// TODO [debris] Please document me pub trait Decoder: Sized { + /// TODO [debris] Please document me fn read_value(&self, f: F) -> Result where F: FnOnce(&[u8]) -> Result; + /// TODO [arkpar] Please document me fn as_list(&self) -> Result, DecoderError>; + /// TODO [Gav Wood] Please document me fn as_rlp<'a>(&'a self) -> &'a UntrustedRlp<'a>; + /// TODO [debris] Please document me fn as_raw(&self) -> &[u8]; } +/// TODO [debris] Please document me pub trait Decodable: Sized { + /// TODO [debris] Please document me fn decode(decoder: &D) -> Result where D: Decoder; } +/// TODO [debris] Please document me pub trait View<'a, 'view>: Sized { + /// TODO [debris] Please document me type Prototype; + /// TODO [debris] Please document me type PayloadInfo; + /// TODO [debris] Please document me type Data; + /// TODO [debris] Please document me type Item; + /// TODO [debris] Please document me type Iter; /// Creates a new instance of `Rlp` reader @@ -41,8 +54,10 @@ pub trait View<'a, 'view>: Sized { /// Get the prototype of the RLP. fn prototype(&self) -> Self::Prototype; + /// TODO [debris] Please document me fn payload_info(&self) -> Self::PayloadInfo; + /// TODO [debris] Please document me fn data(&'view self) -> Self::Data; /// Returns number of RLP items. @@ -179,21 +194,30 @@ pub trait View<'a, 'view>: Sized { /// ``` fn iter(&'view self) -> Self::Iter; + /// TODO [debris] Please document me fn as_val(&self) -> Result where T: Decodable; + /// TODO [debris] Please document me fn val_at(&self, index: usize) -> Result where T: Decodable; } +/// TODO [debris] Please document me pub trait Encoder { + /// TODO [debris] Please document me fn emit_value(&mut self, bytes: &[u8]) -> (); + /// TODO [Gav Wood] Please document me fn emit_list(&mut self, f: F) -> () where F: FnOnce(&mut Self) -> (); + /// TODO [debris] Please document me fn emit_raw(&mut self, bytes: &[u8]) -> (); } +/// TODO [debris] Please document me pub trait Encodable { + /// TODO [debris] Please document me fn encode(&self, encoder: &mut E) -> () where E: Encoder; } +/// TODO [debris] Please document me pub trait Stream: Sized { /// Initializes instance of empty `Stream`. @@ -284,6 +308,7 @@ pub trait Stream: Sized { /// } fn is_finished(&self) -> bool; + /// TODO [debris] Please document me fn as_raw(&self) -> &[u8]; /// Streams out encoded bytes. diff --git a/util/src/rlp/untrusted_rlp.rs b/util/src/rlp/untrusted_rlp.rs index 2bf33ba68..3f11fa070 100644 --- a/util/src/rlp/untrusted_rlp.rs +++ b/util/src/rlp/untrusted_rlp.rs @@ -21,15 +21,21 @@ impl OffsetCache { } #[derive(Debug)] +/// TODO [debris] Please document me pub enum Prototype { + /// TODO [debris] Please document me Null, + /// TODO [debris] Please document me Data(usize), + /// TODO [debris] Please document me List(usize), } /// Stores basic information about item pub struct PayloadInfo { + /// TODO [debris] Please document me pub header_len: usize, + /// TODO [debris] Please document me pub value_len: usize, } diff --git a/util/src/sha3.rs b/util/src/sha3.rs index a33ac61f7..115a408de 100644 --- a/util/src/sha3.rs +++ b/util/src/sha3.rs @@ -6,6 +6,7 @@ use bytes::{BytesConvertable, Populatable}; use hash::{H256, FixedHash}; use self::sha3_ext::*; +/// TODO [Gav Wood] Please document me pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] ); diff --git a/util/src/squeeze.rs b/util/src/squeeze.rs index e81a13793..10f446395 100644 --- a/util/src/squeeze.rs +++ b/util/src/squeeze.rs @@ -36,6 +36,7 @@ use heapsize::HeapSizeOf; /// Should be used to squeeze collections to certain size in bytes pub trait Squeeze { + /// TODO [debris] Please document me fn squeeze(&mut self, size: usize); } diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index 1b5919bd4..ce796bd15 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -1,10 +1,17 @@ +/// TODO [Gav Wood] Please document me pub mod trietraits; pub mod standardmap; +/// TODO [Gav Wood] Please document me pub mod journal; +/// TODO [Gav Wood] Please document me pub mod node; +/// TODO [Gav Wood] Please document me pub mod triedb; +/// TODO [Gav Wood] Please document me pub mod triedbmut; +/// TODO [Gav Wood] Please document me pub mod sectriedb; +/// TODO [Gav Wood] Please document me pub mod sectriedbmut; pub use self::trietraits::*; diff --git a/util/src/trie/node.rs b/util/src/trie/node.rs index b10b0e05e..dad5830b2 100644 --- a/util/src/trie/node.rs +++ b/util/src/trie/node.rs @@ -7,9 +7,13 @@ use super::journal::*; /// Type of node in the trie and essential information thereof. #[derive(Clone, Eq, PartialEq, Debug)] pub enum Node<'a> { + /// TODO [Gav Wood] Please document me Empty, + /// TODO [Gav Wood] Please document me Leaf(NibbleSlice<'a>, &'a[u8]), + /// TODO [Gav Wood] Please document me Extension(NibbleSlice<'a>, &'a[u8]), + /// TODO [Gav Wood] Please document me Branch([&'a[u8]; 16], Option<&'a [u8]>) } diff --git a/util/src/trie/standardmap.rs b/util/src/trie/standardmap.rs index b1ca03d22..0e65849cc 100644 --- a/util/src/trie/standardmap.rs +++ b/util/src/trie/standardmap.rs @@ -7,9 +7,13 @@ use hash::*; /// Alphabet to use when creating words for insertion into tries. pub enum Alphabet { + /// TODO [Gav Wood] Please document me All, + /// TODO [Gav Wood] Please document me Low, + /// TODO [Gav Wood] Please document me Mid, + /// TODO [Gav Wood] Please document me Custom(Bytes), } diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 9e4cf36e2..b0fcfcd16 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -34,6 +34,7 @@ use super::node::*; pub struct TrieDB<'db> { db: &'db HashDB, root: &'db H256, + /// TODO [Gav Wood] Please document me pub hash_count: usize, } diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 0f3dde4fb..3f62b2fdb 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -40,6 +40,7 @@ use super::trietraits::*; pub struct TrieDBMut<'db> { db: &'db mut HashDB, root: &'db mut H256, + /// TODO [Gav Wood] Please document me pub hash_count: usize, } diff --git a/util/src/uint.rs b/util/src/uint.rs index c1cb38fd2..5721c5614 100644 --- a/util/src/uint.rs +++ b/util/src/uint.rs @@ -58,15 +58,20 @@ macro_rules! panic_on_overflow { } } +/// TODO [Gav Wood] Please document me pub trait Uint: Sized + Default + FromStr + From + FromJson + fmt::Debug + fmt::Display + PartialOrd + Ord + PartialEq + Eq + Hash { /// Size of this type. const SIZE: usize; + /// TODO [Gav Wood] Please document me fn zero() -> Self; + /// TODO [Gav Wood] Please document me fn one() -> Self; + /// TODO [Gav Wood] Please document me type FromDecStrErr; + /// TODO [Gav Wood] Please document me fn from_dec_str(value: &str) -> Result; /// Conversion to u32 @@ -98,18 +103,25 @@ pub trait Uint: Sized + Default + FromStr + From + FromJson + fmt::Debug + fn overflowing_pow(self, other: Self) -> (Self, bool); + /// TODO [debris] Please document me fn overflowing_add(self, other: Self) -> (Self, bool); + /// TODO [debris] Please document me fn overflowing_sub(self, other: Self) -> (Self, bool); + /// TODO [debris] Please document me fn overflowing_mul(self, other: Self) -> (Self, bool); + /// TODO [debris] Please document me fn overflowing_div(self, other: Self) -> (Self, bool); + /// TODO [debris] Please document me fn overflowing_rem(self, other: Self) -> (Self, bool); + /// TODO [debris] Please document me fn overflowing_neg(self) -> (Self, bool); + /// TODO [Gav Wood] Please document me fn overflowing_shl(self, shift: u32) -> (Self, bool); } @@ -914,8 +926,11 @@ impl From for u32 { } } +/// TODO [Gav Wood] Please document me pub const ZERO_U256: U256 = U256([0x00u64; 4]); +/// TODO [Gav Wood] Please document me pub const ONE_U256: U256 = U256([0x01u64, 0x00u64, 0x00u64, 0x00u64]); +/// TODO [Gav Wood] Please document me pub const BAD_U256: U256 = U256([0xffffffffffffffffu64; 4]); #[cfg(test)] diff --git a/util/src/vector.rs b/util/src/vector.rs index 552e285cf..94b5ee70c 100644 --- a/util/src/vector.rs +++ b/util/src/vector.rs @@ -2,7 +2,9 @@ use std::ptr; +/// TODO [debris] Please document me pub trait InsertSlice { + /// TODO [debris] Please document me fn insert_slice(&mut self, index: usize, elements: &[T]); } @@ -47,6 +49,7 @@ impl InsertSlice for Vec { /// } /// ``` pub trait SharedPrefix { + /// TODO [debris] Please document me fn shared_prefix_len(&self, elem: &[T]) -> usize; } From 9f73af3e534979ee77dc06e6cdd9d5d8b270e0e9 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 17:02:21 +0100 Subject: [PATCH 25/28] Annotate missing docs script --- annotatemissingdocs.js | 65 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100755 annotatemissingdocs.js diff --git a/annotatemissingdocs.js b/annotatemissingdocs.js new file mode 100755 index 000000000..1ea621b51 --- /dev/null +++ b/annotatemissingdocs.js @@ -0,0 +1,65 @@ +#!/usr/bin/env node + +'use strict'; + +const fs = require('fs'); +const exec = require('child_process').exec; + +// First run +// $ cargo build |& grep "warning: missing documentation" > missingdocs +const lines = fs.readFileSync('./missingdocs', 'utf8').split('\n'); +const pattern = /(.+):([0-9]+):([0-9]+)/; + +const errors = lines.map((line) => { + const parts = line.match(pattern); + if (!parts || parts.length < 4) { + console.error('Strange line: ' + line); + return; + } + return { + path: parts[1], + line: parts[2], + col: parts[3] + }; +}).filter((line) => line); + +const indexed = errors.reduce((index, error) => { + if (!index[error.path]) { + index[error.path] = []; + } + index[error.path].push(error); + + return index; +}, {}); + +for (let path in indexed) { + let file = fs.readFileSync(path, 'utf8').split('\n'); + let error = indexed[path].sort((a, b) => b.line - a.line); + let next = () => { + let err = error.shift(); + if (!err) { + fs.writeFileSync(path, file.join('\n'), 'utf8'); + return; + } + // Process next error + let tabs = Array(parseInt(err.col, 10)).join('\t'); + get_user(path, err.line, (user) => { + let line = err.line - 1; + let comment = `${tabs}/// TODO [${user}] Please document me`; + if (file[line] !== comment) { + file.splice(line, 0, comment); + } + next(); + }); + }; + next(); +} + +function get_user (path, line, cb) { + exec(`git blame ${path}`, (err, stdout, stderr) => { + if (err) throw err; + const l = stdout.split('\n')[line]; + const user = l.match(/\(([a-zA-Z ]+?)\s+2/); + cb(user[1]); + }); +} From abbd396d0f9223b48f4a4c7ac33e824c216c6b90 Mon Sep 17 00:00:00 2001 From: Tomusdrw Date: Tue, 19 Jan 2016 17:02:30 +0100 Subject: [PATCH 26/28] Revert "Annotate missing docs script" This reverts commit 9f73af3e534979ee77dc06e6cdd9d5d8b270e0e9. --- annotatemissingdocs.js | 65 ------------------------------------------ 1 file changed, 65 deletions(-) delete mode 100755 annotatemissingdocs.js diff --git a/annotatemissingdocs.js b/annotatemissingdocs.js deleted file mode 100755 index 1ea621b51..000000000 --- a/annotatemissingdocs.js +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env node - -'use strict'; - -const fs = require('fs'); -const exec = require('child_process').exec; - -// First run -// $ cargo build |& grep "warning: missing documentation" > missingdocs -const lines = fs.readFileSync('./missingdocs', 'utf8').split('\n'); -const pattern = /(.+):([0-9]+):([0-9]+)/; - -const errors = lines.map((line) => { - const parts = line.match(pattern); - if (!parts || parts.length < 4) { - console.error('Strange line: ' + line); - return; - } - return { - path: parts[1], - line: parts[2], - col: parts[3] - }; -}).filter((line) => line); - -const indexed = errors.reduce((index, error) => { - if (!index[error.path]) { - index[error.path] = []; - } - index[error.path].push(error); - - return index; -}, {}); - -for (let path in indexed) { - let file = fs.readFileSync(path, 'utf8').split('\n'); - let error = indexed[path].sort((a, b) => b.line - a.line); - let next = () => { - let err = error.shift(); - if (!err) { - fs.writeFileSync(path, file.join('\n'), 'utf8'); - return; - } - // Process next error - let tabs = Array(parseInt(err.col, 10)).join('\t'); - get_user(path, err.line, (user) => { - let line = err.line - 1; - let comment = `${tabs}/// TODO [${user}] Please document me`; - if (file[line] !== comment) { - file.splice(line, 0, comment); - } - next(); - }); - }; - next(); -} - -function get_user (path, line, cb) { - exec(`git blame ${path}`, (err, stdout, stderr) => { - if (err) throw err; - const l = stdout.split('\n')[line]; - const user = l.match(/\(([a-zA-Z ]+?)\s+2/); - cb(user[1]); - }); -} From c0a923a2716fe97c9e41f34b77987dc0bd719d87 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 20 Jan 2016 15:55:29 +0100 Subject: [PATCH 27/28] basic .travis.yml --- .travis.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..a5b361fc4 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +language: rust + +rust: + - nightly + +os: + - osx + +before_script: + - brew update + - brew install rocksdb From a3ced5140ce07f90e3f4ef0d191d66669e891a03 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 20 Jan 2016 18:43:29 +0300 Subject: [PATCH 28/28] cache directories --- .travis.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index a5b361fc4..1efef4f21 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,3 +9,8 @@ os: before_script: - brew update - brew install rocksdb + +cache: + directories: + - $TRAVIS_BUILD_DIR/target + - $HOME/.cargo \ No newline at end of file