Merge branch 'master' of github.com:ethcore/parity into ethash
This commit is contained in:
		
						commit
						0f97edad7c
					
				
							
								
								
									
										16
									
								
								.travis.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								.travis.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,16 @@ | ||||
| language: rust | ||||
| 
 | ||||
| rust: | ||||
|     - nightly | ||||
| 
 | ||||
| os: | ||||
|     - osx | ||||
| 
 | ||||
| before_script: | ||||
|     - brew update | ||||
|     - brew install rocksdb | ||||
| 
 | ||||
| cache: | ||||
|   directories: | ||||
|   - $TRAVIS_BUILD_DIR/target | ||||
|   - $HOME/.cargo | ||||
| @ -24,3 +24,7 @@ num_cpus = "0.2" | ||||
| [features] | ||||
| jit = ["evmjit"] | ||||
| evm_debug = [] | ||||
| 
 | ||||
| [[bin]] | ||||
| name = "client" | ||||
| path = "src/bin/client/main.rs" | ||||
|  | ||||
							
								
								
									
										21
									
								
								cov.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										21
									
								
								cov.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,21 @@ | ||||
| #!/bin/sh | ||||
| # Installing KCOV under ubuntu | ||||
| # https://users.rust-lang.org/t/tutorial-how-to-collect-test-coverages-for-rust-project/650# | ||||
| ### Install deps | ||||
| # $ sudo apt-get install libcurl4-openssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev | ||||
| # | ||||
| ### Compile kcov | ||||
| # $ wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz && tar xf master.tar.gz | ||||
| # $ cd kcov-master && mkdir build && cd build | ||||
| # $ cmake .. && make && sudo make install | ||||
| 
 | ||||
| ### Running coverage | ||||
| if ! type kcov > /dev/null; then | ||||
|    	echo "Install kcov first (details inside this file). Aborting." | ||||
| 	exit 1 | ||||
| fi | ||||
| 
 | ||||
| cargo test --no-run || exit $? | ||||
| mkdir -p target/coverage | ||||
| kcov --exclude-pattern ~/.multirust --include-pattern src --verify target/coverage target/debug/ethcore* | ||||
| xdg-open target/coverage/index.html | ||||
| @ -236,7 +236,7 @@ mod tests { | ||||
| 
 | ||||
| 	#[test] | ||||
| 	fn storage_at() { | ||||
| 		let mut db = OverlayDB::new_temp(); | ||||
| 		let mut db = MemoryDB::new(); | ||||
| 		let rlp = { | ||||
| 			let mut a = Account::new_contract(U256::from(69u8)); | ||||
| 			a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64))); | ||||
| @ -254,7 +254,7 @@ mod tests { | ||||
| 
 | ||||
| 	#[test] | ||||
| 	fn note_code() { | ||||
| 		let mut db = OverlayDB::new_temp(); | ||||
| 		let mut db = MemoryDB::new(); | ||||
| 
 | ||||
| 		let rlp = { | ||||
| 			let mut a = Account::new_contract(U256::from(69u8)); | ||||
| @ -273,7 +273,7 @@ mod tests { | ||||
| 	#[test] | ||||
| 	fn commit_storage() { | ||||
| 		let mut a = Account::new_contract(U256::from(69u8)); | ||||
| 		let mut db = OverlayDB::new_temp(); | ||||
| 		let mut db = MemoryDB::new(); | ||||
| 		a.set_storage(x!(0), x!(0x1234)); | ||||
| 		assert_eq!(a.storage_root(), None); | ||||
| 		a.commit_storage(&mut db); | ||||
| @ -283,7 +283,7 @@ mod tests { | ||||
| 	#[test] | ||||
| 	fn commit_remove_commit_storage() { | ||||
| 		let mut a = Account::new_contract(U256::from(69u8)); | ||||
| 		let mut db = OverlayDB::new_temp(); | ||||
| 		let mut db = MemoryDB::new(); | ||||
| 		a.set_storage(x!(0), x!(0x1234)); | ||||
| 		a.commit_storage(&mut db); | ||||
| 		a.set_storage(x!(1), x!(0x1234)); | ||||
| @ -296,7 +296,7 @@ mod tests { | ||||
| 	#[test] | ||||
| 	fn commit_code() { | ||||
| 		let mut a = Account::new_contract(U256::from(69u8)); | ||||
| 		let mut db = OverlayDB::new_temp(); | ||||
| 		let mut db = MemoryDB::new(); | ||||
| 		a.init_code(vec![0x55, 0x44, 0xffu8]); | ||||
| 		assert_eq!(a.code_hash(), SHA3_EMPTY); | ||||
| 		a.commit_code(&mut db); | ||||
|  | ||||
| @ -5,8 +5,11 @@ use pod_account::*; | ||||
| /// Change in existance type. 
 | ||||
| // TODO: include other types of change.
 | ||||
| pub enum Existance { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Born, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Alive, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Died, | ||||
| } | ||||
| 
 | ||||
| @ -22,14 +25,20 @@ impl fmt::Display for Existance { | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug,Clone,PartialEq,Eq)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct AccountDiff { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub balance: Diff<U256>,				// Allowed to be Same
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub nonce: Diff<U256>,					// Allowed to be Same
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub code: Diff<Bytes>,					// Allowed to be Same
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub storage: BTreeMap<H256, Diff<H256>>,// Not allowed to be Same
 | ||||
| } | ||||
| 
 | ||||
| impl AccountDiff { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn existance(&self) -> Existance { | ||||
| 		match self.balance { | ||||
| 			Diff::Born(_) => Existance::Born, | ||||
| @ -38,6 +47,7 @@ impl AccountDiff { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<AccountDiff> { | ||||
| 		match (pre, post) { | ||||
| 			(None, Some(x)) => Some(AccountDiff { | ||||
|  | ||||
| @ -30,6 +30,7 @@ pub struct ActionParams { | ||||
| } | ||||
| 
 | ||||
| impl ActionParams { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn new() -> ActionParams { | ||||
| 		ActionParams { | ||||
| 			code_address: Address::new(), | ||||
|  | ||||
| @ -6,7 +6,10 @@ pub type LogBloom = H2048; | ||||
| /// Constant 2048-bit datum for 0. Often used as a default.
 | ||||
| pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub enum Seal { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	With, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Without, | ||||
| } | ||||
|  | ||||
| @ -1,59 +0,0 @@ | ||||
| extern crate ethcore_util as util; | ||||
| extern crate ethcore; | ||||
| extern crate rustc_serialize; | ||||
| extern crate log; | ||||
| extern crate env_logger; | ||||
| 
 | ||||
| use std::io::stdin; | ||||
| use std::env; | ||||
| use log::{LogLevelFilter}; | ||||
| use env_logger::LogBuilder; | ||||
| use util::*; | ||||
| use ethcore::client::*; | ||||
| use ethcore::service::ClientService; | ||||
| use ethcore::ethereum; | ||||
| use ethcore::sync::*; | ||||
| 
 | ||||
| fn setup_log() { | ||||
| 	let mut builder = LogBuilder::new(); | ||||
| 	builder.filter(None, LogLevelFilter::Info); | ||||
| 
 | ||||
| 	if env::var("RUST_LOG").is_ok() { | ||||
| 		builder.parse(&env::var("RUST_LOG").unwrap()); | ||||
| 	} | ||||
| 
 | ||||
| 	builder.init().unwrap(); | ||||
| } | ||||
| 
 | ||||
| fn main() { | ||||
| 	setup_log(); | ||||
| 	let spec = ethereum::new_frontier(); | ||||
| 	let mut service = ClientService::start(spec).unwrap(); | ||||
| 	let io_handler  = Arc::new(ClientIoHandler { client: service.client() }); | ||||
| 	service.io().register_handler(io_handler).expect("Error registering IO handler"); | ||||
| 	loop { | ||||
| 		let mut cmd = String::new(); | ||||
| 		stdin().read_line(&mut cmd).unwrap(); | ||||
| 		if cmd == "quit\n" || cmd == "exit\n" || cmd == "q\n" { | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| struct ClientIoHandler { | ||||
| 	client: Arc<RwLock<Client>>, | ||||
| } | ||||
| 
 | ||||
| impl IoHandler<NetSyncMessage> for ClientIoHandler { | ||||
| 	fn initialize(&self, io: &IoContext<NetSyncMessage>) { 
 | ||||
| 		io.register_timer(0, 5000).expect("Error registering timer"); | ||||
| 	} | ||||
| 
 | ||||
| 	fn timeout(&self, _io: &IoContext<NetSyncMessage>, timer: TimerToken) { | ||||
| 		if timer == 0 { | ||||
| 			println!("Chain info: {:?}", self.client.read().unwrap().deref().chain_info()); | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
							
								
								
									
										109
									
								
								src/bin/client/main.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								src/bin/client/main.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,109 @@ | ||||
| extern crate ethcore_util as util; | ||||
| extern crate ethcore; | ||||
| extern crate rustc_serialize; | ||||
| extern crate log; | ||||
| extern crate env_logger; | ||||
| 
 | ||||
| use std::io::stdin; | ||||
| use std::env; | ||||
| use log::{LogLevelFilter}; | ||||
| use env_logger::LogBuilder; | ||||
| use util::*; | ||||
| use ethcore::client::*; | ||||
| use ethcore::service::ClientService; | ||||
| use ethcore::ethereum; | ||||
| use ethcore::blockchain::CacheSize; | ||||
| use ethcore::sync::*; | ||||
| 
 | ||||
| fn setup_log() { | ||||
| 	let mut builder = LogBuilder::new(); | ||||
| 	builder.filter(None, LogLevelFilter::Info); | ||||
| 
 | ||||
| 	if env::var("RUST_LOG").is_ok() { | ||||
| 		builder.parse(&env::var("RUST_LOG").unwrap()); | ||||
| 	} | ||||
| 
 | ||||
| 	builder.init().unwrap(); | ||||
| } | ||||
| 
 | ||||
| fn main() { | ||||
| 	setup_log(); | ||||
| 	let spec = ethereum::new_frontier(); | ||||
| 	let mut service = ClientService::start(spec).unwrap(); | ||||
| 	let io_handler  = Arc::new(ClientIoHandler { client: service.client(), info: Default::default() }); | ||||
| 	service.io().register_handler(io_handler).expect("Error registering IO handler"); | ||||
| 	loop { | ||||
| 		let mut cmd = String::new(); | ||||
| 		stdin().read_line(&mut cmd).unwrap(); | ||||
| 		if cmd == "quit\n" || cmd == "exit\n" || cmd == "q\n" { | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| struct Informant { | ||||
| 	chain_info: RwLock<Option<BlockChainInfo>>, | ||||
| 	cache_info: RwLock<Option<CacheSize>>, | ||||
| 	report: RwLock<Option<ClientReport>>, | ||||
| } | ||||
| 
 | ||||
| impl Default for Informant { | ||||
| 	fn default() -> Self { | ||||
| 		Informant { | ||||
| 			chain_info: RwLock::new(None), | ||||
| 			cache_info: RwLock::new(None), | ||||
| 			report: RwLock::new(None), | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| impl Informant { | ||||
| 	pub fn tick(&self, client: &Client) { | ||||
| 		// 5 seconds betwen calls. TODO: calculate this properly.
 | ||||
| 		let dur = 5usize; | ||||
| 
 | ||||
| 		let chain_info = client.chain_info(); | ||||
| 		let cache_info = client.cache_info(); | ||||
| 		let report = client.report(); | ||||
| 
 | ||||
| 		if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { | ||||
| 			println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s  //···{}···//  {} ({}) bl  {} ({}) ex ]", | ||||
| 				chain_info.best_block_number, | ||||
| 				chain_info.best_block_hash, | ||||
| 				(report.blocks_imported - last_report.blocks_imported) / dur, | ||||
| 				(report.transactions_applied - last_report.transactions_applied) / dur, | ||||
| 				(report.gas_processed - last_report.gas_processed) / From::from(dur), | ||||
| 				0, // TODO: peers
 | ||||
| 				cache_info.blocks, | ||||
| 				cache_info.blocks as isize - last_cache_info.blocks as isize, | ||||
| 				cache_info.block_details, | ||||
| 				cache_info.block_details as isize - last_cache_info.block_details as isize | ||||
| 			); | ||||
| 		} | ||||
| 
 | ||||
| 		*self.chain_info.write().unwrap().deref_mut() = Some(chain_info); | ||||
| 		*self.cache_info.write().unwrap().deref_mut() = Some(cache_info); | ||||
| 		*self.report.write().unwrap().deref_mut() = Some(report); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| const INFO_TIMER: TimerToken = 0; | ||||
| 
 | ||||
| struct ClientIoHandler { | ||||
| 	client: Arc<RwLock<Client>>, | ||||
| 	info: Informant, | ||||
| } | ||||
| 
 | ||||
| impl IoHandler<NetSyncMessage> for ClientIoHandler { | ||||
| 	fn initialize(&self, io: &IoContext<NetSyncMessage>) { 
 | ||||
| 		io.register_timer(INFO_TIMER, 5000).expect("Error registering timer"); | ||||
| 	} | ||||
| 
 | ||||
| 	fn timeout(&self, _io: &IoContext<NetSyncMessage>, timer: TimerToken) { | ||||
| 		if INFO_TIMER == timer { | ||||
| 			let client = self.client.read().unwrap(); | ||||
| 			self.info.tick(client.deref()); | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
							
								
								
									
										24
									
								
								src/block.rs
									
									
									
									
									
								
							
							
						
						
									
										24
									
								
								src/block.rs
									
									
									
									
									
								
							| @ -24,9 +24,13 @@ pub struct Block { | ||||
| 
 | ||||
| /// A set of references to `Block` fields that are publicly accessible. 
 | ||||
| pub struct BlockRefMut<'a> { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub header: &'a Header, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub state: &'a mut State, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub archive: &'a Vec<Entry>, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub uncles: &'a Vec<Header>, | ||||
| } | ||||
| 
 | ||||
| @ -104,7 +108,7 @@ pub struct SealedBlock { | ||||
| 
 | ||||
| impl<'x, 'y> OpenBlock<'x, 'y> { | ||||
| 	/// Create a new OpenBlock ready for transaction pushing.
 | ||||
| 	pub fn new<'a, 'b>(engine: &'a Engine, db: OverlayDB, parent: &Header, last_hashes: &'b LastHashes, author: Address, extra_data: Bytes) -> OpenBlock<'a, 'b> { | ||||
| 	pub fn new<'a, 'b>(engine: &'a Engine, db: JournalDB, parent: &Header, last_hashes: &'b LastHashes, author: Address, extra_data: Bytes) -> OpenBlock<'a, 'b> { | ||||
| 		let mut r = OpenBlock { | ||||
| 			block: Block::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), | ||||
| 			engine: engine, | ||||
| @ -242,7 +246,7 @@ impl<'x, 'y> ClosedBlock<'x, 'y> { | ||||
| 	pub fn reopen(self) -> OpenBlock<'x, 'y> { self.open_block } | ||||
| 
 | ||||
| 	/// Drop this object and return the underlieing database.
 | ||||
| 	pub fn drain(self) -> OverlayDB { self.open_block.block.state.drop().1 } | ||||
| 	pub fn drain(self) -> JournalDB { self.open_block.block.state.drop().1 } | ||||
| } | ||||
| 
 | ||||
| impl SealedBlock { | ||||
| @ -257,7 +261,7 @@ impl SealedBlock { | ||||
| 	} | ||||
| 
 | ||||
| 	/// Drop this object and return the underlieing database.
 | ||||
| 	pub fn drain(self) -> OverlayDB { self.block.state.drop().1 } | ||||
| 	pub fn drain(self) -> JournalDB { self.block.state.drop().1 } | ||||
| } | ||||
| 
 | ||||
| impl IsBlock for SealedBlock { | ||||
| @ -265,7 +269,7 @@ impl IsBlock for SealedBlock { | ||||
| } | ||||
| 
 | ||||
| /// Enact the block given by block header, transactions and uncles
 | ||||
| pub fn enact<'x, 'y>(header: &Header, transactions: &[Transaction], uncles: &[Header], engine: &'x Engine, db: OverlayDB, parent: &Header, last_hashes: &'y LastHashes) -> Result<ClosedBlock<'x, 'y>, Error> { | ||||
| pub fn enact<'x, 'y>(header: &Header, transactions: &[Transaction], uncles: &[Header], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: &'y LastHashes) -> Result<ClosedBlock<'x, 'y>, Error> { | ||||
| 	{ | ||||
| 		let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); | ||||
| 		trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); | ||||
| @ -281,20 +285,20 @@ pub fn enact<'x, 'y>(header: &Header, transactions: &[Transaction], uncles: &[He | ||||
| } | ||||
| 
 | ||||
| /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
 | ||||
| pub fn enact_bytes<'x, 'y>(block_bytes: &[u8], engine: &'x Engine, db: OverlayDB, parent: &Header, last_hashes: &'y LastHashes) -> Result<ClosedBlock<'x, 'y>, Error> { | ||||
| pub fn enact_bytes<'x, 'y>(block_bytes: &[u8], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: &'y LastHashes) -> Result<ClosedBlock<'x, 'y>, Error> { | ||||
| 	let block = BlockView::new(block_bytes); | ||||
| 	let header = block.header(); | ||||
| 	enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) | ||||
| } | ||||
| 
 | ||||
| /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
 | ||||
| pub fn enact_verified<'x, 'y>(block: &PreVerifiedBlock, engine: &'x Engine, db: OverlayDB, parent: &Header, last_hashes: &'y LastHashes) -> Result<ClosedBlock<'x, 'y>, Error> { | ||||
| pub fn enact_verified<'x, 'y>(block: &PreVerifiedBlock, engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: &'y LastHashes) -> Result<ClosedBlock<'x, 'y>, Error> { | ||||
| 	let view = BlockView::new(&block.bytes); | ||||
| 	enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) | ||||
| } | ||||
| 
 | ||||
| /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
 | ||||
| pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: OverlayDB, parent: &Header, last_hashes: &LastHashes) -> Result<SealedBlock, Error> { | ||||
| pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: &LastHashes) -> Result<SealedBlock, Error> { | ||||
| 	let header = BlockView::new(block_bytes).header_view(); | ||||
| 	Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(header.seal()))) | ||||
| } | ||||
| @ -304,7 +308,7 @@ fn open_block() { | ||||
| 	use spec::*; | ||||
| 	let engine = Spec::new_test().to_engine().unwrap(); | ||||
| 	let genesis_header = engine.spec().genesis_header(); | ||||
| 	let mut db = OverlayDB::new_temp(); | ||||
| 	let mut db = JournalDB::new_temp(); | ||||
| 	engine.spec().ensure_db_good(&mut db); | ||||
| 	let last_hashes = vec![genesis_header.hash()]; | ||||
| 	let b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); | ||||
| @ -318,13 +322,13 @@ fn enact_block() { | ||||
| 	let engine = Spec::new_test().to_engine().unwrap(); | ||||
| 	let genesis_header = engine.spec().genesis_header(); | ||||
| 
 | ||||
| 	let mut db = OverlayDB::new_temp(); | ||||
| 	let mut db = JournalDB::new_temp(); | ||||
| 	engine.spec().ensure_db_good(&mut db); | ||||
| 	let b = OpenBlock::new(engine.deref(), db, &genesis_header, &vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(vec![]).unwrap(); | ||||
| 	let orig_bytes = b.rlp_bytes(); | ||||
| 	let orig_db = b.drain(); | ||||
| 
 | ||||
| 	let mut db = OverlayDB::new_temp(); | ||||
| 	let mut db = JournalDB::new_temp(); | ||||
| 	engine.spec().ensure_db_good(&mut db); | ||||
| 	let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, &vec![genesis_header.hash()]).unwrap(); | ||||
| 
 | ||||
|  | ||||
| @ -15,21 +15,34 @@ use views::*; | ||||
| ///
 | ||||
| /// - `index` - an index where best common ancestor would be.
 | ||||
| pub struct TreeRoute { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub blocks: Vec<H256>, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub ancestor: H256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub index: usize | ||||
| } | ||||
| 
 | ||||
| /// Represents blockchain's in-memory cache size in bytes.
 | ||||
| #[derive(Debug)] | ||||
| pub struct CacheSize { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub blocks: usize, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub block_details: usize, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub transaction_addresses: usize, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub block_logs: usize, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub blocks_blooms: usize | ||||
| } | ||||
| 
 | ||||
| impl CacheSize { | ||||
| 	/// Total amount used by the cache.
 | ||||
| 	fn total(&self) -> usize { self.blocks + self.block_details + self.transaction_addresses + self.block_logs + self.blocks_blooms } | ||||
| } | ||||
| 
 | ||||
| /// Information about best block gathered together
 | ||||
| struct BestBlock { | ||||
| 	pub hash: H256, | ||||
| @ -96,6 +109,17 @@ pub trait BlockProvider { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Hash, Eq, PartialEq, Clone)] | ||||
| enum CacheID { | ||||
| 	Block(H256), | ||||
| 	Extras(ExtrasIndex, H256), | ||||
| } | ||||
| 
 | ||||
| struct CacheManager { | ||||
| 	cache_usage: VecDeque<HashSet<CacheID>>, | ||||
| 	in_use: HashSet<CacheID>, | ||||
| } | ||||
| 
 | ||||
| /// Structure providing fast access to blockchain data.
 | ||||
| ///
 | ||||
| /// **Does not do input data verification.**
 | ||||
| @ -113,7 +137,9 @@ pub struct BlockChain { | ||||
| 	blocks_blooms: RwLock<HashMap<H256, BlocksBlooms>>, | ||||
| 
 | ||||
| 	extras_db: DB, | ||||
| 	blocks_db: DB | ||||
| 	blocks_db: DB, | ||||
| 
 | ||||
| 	cache_man: RwLock<CacheManager>, | ||||
| } | ||||
| 
 | ||||
| impl BlockProvider for BlockChain { | ||||
| @ -136,6 +162,8 @@ impl BlockProvider for BlockChain { | ||||
| 		let opt = self.blocks_db.get(hash) | ||||
| 			.expect("Low level database error. Some issue with disk?"); | ||||
| 
 | ||||
| 		self.note_used(CacheID::Block(hash.clone())); | ||||
| 
 | ||||
| 		match opt { | ||||
| 			Some(b) => { | ||||
| 				let bytes: Bytes = b.to_vec(); | ||||
| @ -158,6 +186,10 @@ impl BlockProvider for BlockChain { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| const COLLECTION_QUEUE_SIZE: usize = 2; | ||||
| const MIN_CACHE_SIZE: usize = 1; | ||||
| const MAX_CACHE_SIZE: usize = 1024 * 1024 * 1; | ||||
| 
 | ||||
| impl BlockChain { | ||||
| 	/// Create new instance of blockchain from given Genesis
 | ||||
| 	///
 | ||||
| @ -197,6 +229,9 @@ impl BlockChain { | ||||
| 		blocks_path.push("blocks"); | ||||
| 		let blocks_db = DB::open_default(blocks_path.to_str().unwrap()).unwrap(); | ||||
| 
 | ||||
| 		let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()}; | ||||
| 		(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); | ||||
| 
 | ||||
| 		let bc = BlockChain { | ||||
| 			best_block: RwLock::new(BestBlock::new()), | ||||
| 			blocks: RwLock::new(HashMap::new()), | ||||
| @ -206,7 +241,8 @@ impl BlockChain { | ||||
| 			block_logs: RwLock::new(HashMap::new()), | ||||
| 			blocks_blooms: RwLock::new(HashMap::new()), | ||||
| 			extras_db: extras_db, | ||||
| 			blocks_db: blocks_db | ||||
| 			blocks_db: blocks_db, | ||||
| 			cache_man: RwLock::new(cache_man), | ||||
| 		}; | ||||
| 
 | ||||
| 		// load best block
 | ||||
| @ -251,7 +287,7 @@ impl BlockChain { | ||||
| 	/// Ensure that the best block does indeed have a state_root in the state DB.
 | ||||
| 	/// If it doesn't, then rewind down until we find one that does and delete data to ensure that
 | ||||
| 	/// later blocks will be reimported. 
 | ||||
| 	pub fn ensure_good(&mut self, _state: &OverlayDB) { | ||||
| 	pub fn ensure_good(&mut self, _state: &JournalDB) { | ||||
| 		unimplemented!(); | ||||
| 	} | ||||
| 
 | ||||
| @ -497,6 +533,10 @@ impl BlockChain { | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if let Some(h) = hash.as_h256() { | ||||
| 			self.note_used(CacheID::Extras(T::extras_index(), h.clone())); | ||||
| 		} | ||||
| 
 | ||||
| 		self.extras_db.get_extras(hash).map(| t: T | { | ||||
| 			let mut write = cache.write().unwrap(); | ||||
| 			write.insert(hash.clone(), t.clone()); | ||||
| @ -537,6 +577,56 @@ impl BlockChain { | ||||
| 		self.block_logs.write().unwrap().squeeze(size.block_logs); | ||||
| 		self.blocks_blooms.write().unwrap().squeeze(size.blocks_blooms); | ||||
| 	} | ||||
| 
 | ||||
| 	/// Let the cache system know that a cacheable item has been used.
 | ||||
| 	fn note_used(&self, id: CacheID) { | ||||
| 		let mut cache_man = self.cache_man.write().unwrap(); | ||||
| 		if !cache_man.cache_usage[0].contains(&id) { | ||||
| 			cache_man.cache_usage[0].insert(id.clone()); | ||||
| 			if cache_man.in_use.contains(&id) { | ||||
| 				if let Some(c) = cache_man.cache_usage.iter_mut().skip(1).find(|e|e.contains(&id)) { | ||||
| 					c.remove(&id); | ||||
| 				} | ||||
| 			} else { | ||||
| 				cache_man.in_use.insert(id); | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// Ticks our cache system and throws out any old data.
 | ||||
| 	pub fn collect_garbage(&self, force: bool) { | ||||
| 		// TODO: check time.
 | ||||
| 		let timeout = true; | ||||
| 
 | ||||
| 		let t = self.cache_size().total(); | ||||
| 		if t < MIN_CACHE_SIZE || (!timeout && (!force || t < MAX_CACHE_SIZE)) { return; } | ||||
| 
 | ||||
| 		let mut cache_man = self.cache_man.write().unwrap(); | ||||
| 		let mut blocks = self.blocks.write().unwrap(); | ||||
| 		let mut block_details = self.block_details.write().unwrap(); | ||||
| 		let mut block_hashes = self.block_hashes.write().unwrap(); | ||||
| 		let mut transaction_addresses = self.transaction_addresses.write().unwrap(); | ||||
| 		let mut block_logs = self.block_logs.write().unwrap(); | ||||
| 		let mut blocks_blooms = self.blocks_blooms.write().unwrap(); | ||||
| 
 | ||||
| 		for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { | ||||
| 			cache_man.in_use.remove(&id); | ||||
| 			match id { | ||||
| 				CacheID::Block(h) => { blocks.remove(&h); }, | ||||
| 				CacheID::Extras(ExtrasIndex::BlockDetails, h) => { block_details.remove(&h); }, | ||||
| 				CacheID::Extras(ExtrasIndex::TransactionAddress, h) => { transaction_addresses.remove(&h); }, | ||||
| 				CacheID::Extras(ExtrasIndex::BlockLogBlooms, h) => { block_logs.remove(&h); }, | ||||
| 				CacheID::Extras(ExtrasIndex::BlocksBlooms, h) => { blocks_blooms.remove(&h); }, | ||||
| 				_ => panic!(), | ||||
| 			} | ||||
| 		} | ||||
| 		cache_man.cache_usage.push_front(HashSet::new()); | ||||
| 
 | ||||
| 		// TODO: handle block_hashes properly.
 | ||||
| 		block_hashes.clear(); | ||||
| 
 | ||||
| 		// TODO: m_lastCollection = chrono::system_clock::now();
 | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
|  | ||||
| @ -63,6 +63,7 @@ impl Builtin { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub fn copy_to(src: &[u8], dest: &mut[u8]) { | ||||
| 	// NICE: optimise
 | ||||
| 	for i in 0..min(src.len(), dest.len()) { | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| use util::*; | ||||
| use rocksdb::{Options, DB}; | ||||
| use blockchain::{BlockChain, BlockProvider}; | ||||
| use blockchain::{BlockChain, BlockProvider, CacheSize}; | ||||
| use views::BlockView; | ||||
| use error::*; | ||||
| use header::BlockNumber; | ||||
| @ -40,12 +40,20 @@ pub struct BlockChainInfo { | ||||
| 	pub best_block_number: BlockNumber | ||||
| } | ||||
| 
 | ||||
| impl fmt::Display for BlockChainInfo { | ||||
| 	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | ||||
| 		write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// Block queue status
 | ||||
| #[derive(Debug)] | ||||
| pub struct BlockQueueStatus { | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub full: bool, | ||||
| } | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub type TreeRoute = ::blockchain::TreeRoute; | ||||
| 
 | ||||
| /// Blockchain database client. Owns and manages a blockchain and a block queue.
 | ||||
| @ -99,14 +107,37 @@ pub trait BlockChainClient : Sync + Send { | ||||
| 	fn chain_info(&self) -> BlockChainInfo; | ||||
| } | ||||
| 
 | ||||
| #[derive(Default, Clone, Debug, Eq, PartialEq)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct ClientReport { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub blocks_imported: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub transactions_applied: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub gas_processed: U256, | ||||
| } | ||||
| 
 | ||||
| impl ClientReport { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn accrue_block(&mut self, block: &PreVerifiedBlock) { | ||||
| 		self.blocks_imported += 1; | ||||
| 		self.transactions_applied += block.transactions.len(); | ||||
| 		self.gas_processed += block.header.gas_used; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
 | ||||
| pub struct Client { | ||||
| 	chain: Arc<RwLock<BlockChain>>, | ||||
| 	engine: Arc<Box<Engine>>, | ||||
| 	state_db: OverlayDB, | ||||
| 	state_db: JournalDB, | ||||
| 	queue: BlockQueue, | ||||
| 	report: ClientReport, | ||||
| } | ||||
| 
 | ||||
| const HISTORY: u64 = 1000; | ||||
| 
 | ||||
| impl Client { | ||||
| 	/// Create a new client with given spec and DB path.
 | ||||
| 	pub fn new(spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Client, Error> { | ||||
| @ -114,9 +145,7 @@ impl Client { | ||||
| 		let mut opts = Options::new(); | ||||
| 		opts.set_max_open_files(256); | ||||
| 		opts.create_if_missing(true); | ||||
| 		/* | ||||
| 		opts.set_max_open_files(256); | ||||
| 		opts.set_use_fsync(false); | ||||
| 		/*opts.set_use_fsync(false);
 | ||||
| 		opts.set_bytes_per_sync(8388608); | ||||
| 		opts.set_disable_data_sync(false); | ||||
| 		opts.set_block_cache_size_mb(1024); | ||||
| @ -131,17 +160,17 @@ impl Client { | ||||
| 		opts.set_max_background_compactions(4); | ||||
| 		opts.set_max_background_flushes(4); | ||||
| 		opts.set_filter_deletes(false); | ||||
| 		opts.set_disable_auto_compactions(true);		
 | ||||
| 		*/ | ||||
| 		opts.set_disable_auto_compactions(false);*/ | ||||
| 
 | ||||
| 		let mut state_path = path.to_path_buf(); | ||||
| 		state_path.push("state"); | ||||
| 		let db = DB::open(&opts, state_path.to_str().unwrap()).unwrap(); | ||||
| 		let mut state_db = OverlayDB::new(db); | ||||
| 		let mut state_db = JournalDB::new(db); | ||||
| 		
 | ||||
| 		let engine = Arc::new(try!(spec.to_engine())); | ||||
| 		engine.spec().ensure_db_good(&mut state_db); | ||||
| 		state_db.commit().expect("Error commiting genesis state to state DB"); | ||||
| 		if engine.spec().ensure_db_good(&mut state_db) { | ||||
| 			state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); | ||||
| 		} | ||||
| 
 | ||||
| //		chain.write().unwrap().ensure_good(&state_db);
 | ||||
| 
 | ||||
| @ -150,6 +179,7 @@ impl Client { | ||||
| 			engine: engine.clone(), | ||||
| 			state_db: state_db, | ||||
| 			queue: BlockQueue::new(engine, message_channel), | ||||
| 			report: Default::default(), | ||||
| 		}) | ||||
| 	} | ||||
| 
 | ||||
| @ -214,16 +244,33 @@ impl Client { | ||||
| 			} | ||||
| 
 | ||||
| 			self.chain.write().unwrap().insert_block(&block.bytes); //TODO: err here?
 | ||||
| 			match result.drain().commit() { | ||||
| 			let ancient = if header.number() >= HISTORY { Some(header.number() - HISTORY) } else { None }; | ||||
| 			match result.drain().commit(header.number(), &header.hash(), ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap()))) { | ||||
| 				Ok(_) => (), | ||||
| 				Err(e) => { | ||||
| 					warn!(target: "client", "State DB commit failed: {:?}", e); | ||||
| 					return; | ||||
| 				} | ||||
| 			} | ||||
| 			//info!(target: "client", "Imported #{} ({})", header.number(), header.hash());
 | ||||
| 			self.report.accrue_block(&block); | ||||
| 			trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// Get info on the cache.
 | ||||
| 	pub fn cache_info(&self) -> CacheSize { | ||||
| 		self.chain.read().unwrap().cache_size() | ||||
| 	} | ||||
| 
 | ||||
| 	/// Get the report.
 | ||||
| 	pub fn report(&self) -> ClientReport { | ||||
| 		self.report.clone() | ||||
| 	} | ||||
| 
 | ||||
| 	/// Tick the client.
 | ||||
| 	pub fn tick(&self) { | ||||
| 		self.chain.read().unwrap().collect_garbage(false); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| impl BlockChainClient for Client { | ||||
|  | ||||
| @ -31,11 +31,14 @@ pub trait Engine : Sync + Send { | ||||
| 
 | ||||
| 	/// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`.
 | ||||
| 	fn maximum_extra_data_size(&self) -> usize { decode(&self.spec().engine_params.get("maximumExtraDataSize").unwrap()) } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn maximum_uncle_count(&self) -> usize { 2 } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn account_start_nonce(&self) -> U256 { decode(&self.spec().engine_params.get("accountStartNonce").unwrap()) } | ||||
| 
 | ||||
| 	/// Block transformation functions, before and after the transactions.
 | ||||
| 	fn on_new_block(&self, _block: &mut Block) {} | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn on_close_block(&self, _block: &mut Block) {} | ||||
| 
 | ||||
| 	// TODO: consider including State in the params for verification functions.
 | ||||
| @ -55,6 +58,7 @@ pub trait Engine : Sync + Send { | ||||
| 	// TODO: Add flags for which bits of the transaction to check.
 | ||||
| 	// TODO: consider including State in the params.
 | ||||
| 	fn verify_transaction_basic(&self, _t: &Transaction, _header: &Header) -> Result<(), Error> { Ok(()) } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn verify_transaction(&self, _t: &Transaction, _header: &Header) -> Result<(), Error> { Ok(()) } | ||||
| 
 | ||||
| 	/// Don't forget to call Super::populateFromParent when subclassing & overriding.
 | ||||
| @ -63,8 +67,11 @@ pub trait Engine : Sync + Send { | ||||
| 
 | ||||
| 	// TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic
 | ||||
| 	// from Spec into here and removing the Spec::builtins field.
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn is_builtin(&self, a: &Address) -> bool { self.spec().builtins.contains_key(a) } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.spec().builtins.get(a).unwrap().cost(input.len()) } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut [u8]) { self.spec().builtins.get(a).unwrap().execute(input, output); } | ||||
| 
 | ||||
| 	// TODO: sealing stuff - though might want to leave this for later.
 | ||||
|  | ||||
| @ -25,6 +25,7 @@ pub struct EnvInfo { | ||||
| } | ||||
| 
 | ||||
| impl EnvInfo { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub fn new() -> EnvInfo { | ||||
| 		EnvInfo { | ||||
| 			number: 0, | ||||
|  | ||||
							
								
								
									
										74
									
								
								src/error.rs
									
									
									
									
									
								
							
							
						
						
									
										74
									
								
								src/error.rs
									
									
									
									
									
								
							| @ -5,15 +5,22 @@ use header::BlockNumber; | ||||
| use basic_types::LogBloom; | ||||
| 
 | ||||
| #[derive(Debug, PartialEq, Eq)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct Mismatch<T: fmt::Debug> { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub expected: T, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub found: T, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, PartialEq, Eq)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct OutOfBounds<T: fmt::Debug> { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub min: Option<T>, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub max: Option<T>, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub found: T, | ||||
| } | ||||
| 
 | ||||
| @ -22,58 +29,112 @@ pub struct OutOfBounds<T: fmt::Debug> { | ||||
| pub enum ExecutionError { | ||||
| 	/// Returned when there gas paid for transaction execution is
 | ||||
| 	/// lower than base gas required.
 | ||||
| 	NotEnoughBaseGas { required: U256, got: U256 }, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	NotEnoughBaseGas { 
 | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		required: U256, 
 | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		got: U256 | ||||
| 	}, | ||||
| 	/// Returned when block (gas_used + gas) > gas_limit.
 | ||||
| 	/// 
 | ||||
| 	/// If gas =< gas_limit, upstream may try to execute the transaction
 | ||||
| 	/// in next block.
 | ||||
| 	BlockGasLimitReached { gas_limit: U256, gas_used: U256, gas: U256 }, | ||||
| 	BlockGasLimitReached { 
 | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		gas_limit: U256, | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		gas_used: U256, | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		gas: U256 
 | ||||
| 	}, | ||||
| 	/// Returned when transaction nonce does not match state nonce.
 | ||||
| 	InvalidNonce { expected: U256, got: U256 }, | ||||
| 	InvalidNonce { 
 | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		expected: U256, | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		got: U256 | ||||
| 	}, | ||||
| 	/// Returned when cost of transaction (value + gas_price * gas) exceeds 
 | ||||
| 	/// current sender balance.
 | ||||
| 	NotEnoughCash { required: U512, got: U512 }, | ||||
| 	NotEnoughCash { 
 | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		required: U512, | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		got: U512 | ||||
| 	}, | ||||
| 	/// Returned when internal evm error occurs.
 | ||||
| 	Internal | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub enum TransactionError { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	InvalidGasLimit(OutOfBounds<U256>), | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, PartialEq, Eq)] | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub enum BlockError { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	TooManyUncles(OutOfBounds<usize>), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	UncleWrongGeneration, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	ExtraDataOutOfBounds(OutOfBounds<usize>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidSealArity(Mismatch<usize>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	TooMuchGasUsed(OutOfBounds<U256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidUnclesHash(Mismatch<H256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	UncleTooOld(OutOfBounds<BlockNumber>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	UncleIsBrother(OutOfBounds<BlockNumber>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	UncleInChain(H256), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	UncleParentNotInChain(H256), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidStateRoot(Mismatch<H256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidGasUsed(Mismatch<U256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidTransactionsRoot(Mismatch<H256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidDifficulty(Mismatch<U256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidGasLimit(OutOfBounds<U256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidReceiptsStateRoot(Mismatch<H256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidTimestamp(OutOfBounds<u64>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidLogBloom(Mismatch<LogBloom>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidEthashDifficulty(Mismatch<U256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidBlockNonce(Mismatch<H256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidParentHash(Mismatch<H256>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidNumber(OutOfBounds<BlockNumber>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	UnknownParent(H256), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	UnknownUncleParent(H256), | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub enum ImportError { | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	Bad(Option<Error>), | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	AlreadyInChain, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	AlreadyQueued, | ||||
| } | ||||
| 
 | ||||
| @ -89,10 +150,15 @@ pub type ImportResult = Result<(), ImportError>; | ||||
| #[derive(Debug)] | ||||
| /// General error type which should be capable of representing all errors in ethcore.
 | ||||
| pub enum Error { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Util(UtilError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Block(BlockError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	UnknownEngineName(String), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Execution(ExecutionError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Transaction(TransactionError), | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -1,17 +1,22 @@ | ||||
| use util::*; | ||||
| 
 | ||||
| #[inline] | ||||
| /// TODO [debris] Please document me
 | ||||
| pub fn ether() -> U256 { U256::exp10(18) } | ||||
| 
 | ||||
| #[inline] | ||||
| /// TODO [debris] Please document me
 | ||||
| pub fn finney() -> U256 { U256::exp10(15) } | ||||
| 
 | ||||
| #[inline] | ||||
| /// TODO [debris] Please document me
 | ||||
| pub fn szabo() -> U256 { U256::exp10(12) } | ||||
| 
 | ||||
| #[inline] | ||||
| /// TODO [debris] Please document me
 | ||||
| pub fn shannon() -> U256 { U256::exp10(9) } | ||||
| 
 | ||||
| #[inline] | ||||
| /// TODO [debris] Please document me
 | ||||
| pub fn wei() -> U256 { U256::exp10(0) } | ||||
| 
 | ||||
|  | ||||
| @ -19,6 +19,7 @@ pub struct Ethash { | ||||
| } | ||||
| 
 | ||||
| impl Ethash { | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn new_boxed(spec: Spec) -> Box<Engine> { | ||||
| 		Box::new(Ethash { | ||||
| 			spec: spec, | ||||
| @ -215,7 +216,7 @@ fn on_close_block() { | ||||
| 	use super::*; | ||||
| 	let engine = new_morden().to_engine().unwrap(); | ||||
| 	let genesis_header = engine.spec().genesis_header(); | ||||
| 	let mut db = OverlayDB::new_temp(); | ||||
| 	let mut db = JournalDB::new_temp(); | ||||
| 	engine.spec().ensure_db_good(&mut db); | ||||
| 	let last_hashes = vec![genesis_header.hash()]; | ||||
| 	let b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); | ||||
| @ -228,7 +229,7 @@ fn on_close_block_with_uncle() { | ||||
| 	use super::*; | ||||
| 	let engine = new_morden().to_engine().unwrap(); | ||||
| 	let genesis_header = engine.spec().genesis_header(); | ||||
| 	let mut db = OverlayDB::new_temp(); | ||||
| 	let mut db = JournalDB::new_temp(); | ||||
| 	engine.spec().ensure_db_good(&mut db); | ||||
| 	let last_hashes = vec![genesis_header.hash()]; | ||||
| 	let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); | ||||
|  | ||||
| @ -3,7 +3,9 @@ | ||||
| //! Contains all Ethereum network specific stuff, such as denominations and
 | ||||
| //! consensus specifications.
 | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod ethash; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod denominations; | ||||
| 
 | ||||
| pub use self::ethash::*; | ||||
| @ -40,7 +42,7 @@ mod tests { | ||||
| 	fn ensure_db_good() { | ||||
| 		let engine = new_morden().to_engine().unwrap(); | ||||
| 		let genesis_header = engine.spec().genesis_header(); | ||||
| 		let mut db = OverlayDB::new_temp(); | ||||
| 		let mut db = JournalDB::new_temp(); | ||||
| 		engine.spec().ensure_db_good(&mut db); | ||||
| 		let s = State::from_existing(db.clone(), genesis_header.state_root.clone(), engine.account_start_nonce()); | ||||
| 		assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); | ||||
|  | ||||
| @ -15,23 +15,31 @@ pub enum Error { | ||||
| 	/// `BadJumpDestination` is returned when execution tried to move
 | ||||
| 	/// to position that wasn't marked with JUMPDEST instruction
 | ||||
| 	BadJumpDestination { | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		destination: usize | ||||
| 	}, | ||||
| 	/// `BadInstructions` is returned when given instruction is not supported
 | ||||
| 	BadInstruction { | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		instruction: u8, | ||||
| 	}, | ||||
| 	/// `StackUnderflow` when there is not enough stack elements to execute instruction
 | ||||
| 	/// First parameter says how many elements were needed and the second how many were actually on Stack
 | ||||
| 	StackUnderflow { | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		instruction: &'static str, | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		wanted: usize, 
 | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		on_stack: usize | ||||
| 	}, | ||||
| 	/// When execution would exceed defined Stack Limit
 | ||||
| 	OutOfStack { | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		instruction: &'static str, | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		wanted: usize, 
 | ||||
| 		/// TODO [Tomusdrw] Please document me
 | ||||
| 		limit: usize | ||||
| 	}, | ||||
| 	/// Returned on evm internal error. Should never be ignored during development.
 | ||||
|  | ||||
| @ -26,6 +26,7 @@ pub enum MessageCallResult { | ||||
| 	Failed | ||||
| } | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait Ext { | ||||
| 	/// Returns a value for given key.
 | ||||
| 	fn storage_at(&self, key: &H256) -> H256; | ||||
|  | ||||
| @ -3,8 +3,11 @@ use std::fmt; | ||||
| use evm::Evm; | ||||
| 
 | ||||
| #[derive(Clone)] | ||||
| /// TODO [Tomusdrw] Please document me
 | ||||
| pub enum VMType { | ||||
| 	/// TODO [Tomusdrw] Please document me
 | ||||
| 	Jit, | ||||
| 	/// TODO [Tomusdrw] Please document me
 | ||||
| 	Interpreter | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -4,7 +4,6 @@ use common::*; | ||||
| use evm; | ||||
| use super::instructions as instructions; | ||||
| use super::instructions::Instruction; | ||||
| use std::num::wrapping::OverflowingOps; | ||||
| use std::marker::Copy; | ||||
| use evm::{MessageCallResult, ContractCreateResult}; | ||||
| 
 | ||||
|  | ||||
| @ -2,6 +2,7 @@ | ||||
| 
 | ||||
| pub mod ext; | ||||
| pub mod evm; | ||||
| /// TODO [Tomusdrw] Please document me
 | ||||
| pub mod interpreter; | ||||
| #[macro_use] | ||||
| pub mod factory; | ||||
|  | ||||
| @ -2,36 +2,67 @@ | ||||
| 
 | ||||
| /// Definition of the cost schedule and other parameterisations for the EVM.
 | ||||
| pub struct Schedule { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub exceptional_failed_code_deposit: bool, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub have_delegate_call: bool, | ||||
| 	/// TODO [Tomusdrw] Please document me
 | ||||
| 	pub stack_limit: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub max_depth: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub tier_step_gas: [usize; 8], | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub exp_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub exp_byte_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub sha3_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub sha3_word_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub sload_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub sstore_set_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub sstore_reset_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub sstore_refund_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub jumpdest_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub log_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub log_data_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub log_topic_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub create_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub call_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub call_stipend: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub call_value_transfer_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub call_new_account_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub suicide_refund_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub memory_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub quad_coeff_div: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub create_data_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub tx_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub tx_create_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub tx_data_zero_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub tx_data_non_zero_gas: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub copy_gas: usize, | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -3,17 +3,23 @@ use header::BlockNumber; | ||||
| use rocksdb::{DB, Writable}; | ||||
| 
 | ||||
| /// Represents index of extra data in database
 | ||||
| #[derive(Copy, Clone)] | ||||
| #[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] | ||||
| pub enum ExtrasIndex { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	BlockDetails = 0, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	BlockHash = 1, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	TransactionAddress = 2, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	BlockLogBlooms = 3, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	BlocksBlooms = 4 | ||||
| } 
 | ||||
| 
 | ||||
| /// trait used to write Extras data to db
 | ||||
| pub trait ExtrasWritable { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn put_extras<K, T>(&self, hash: &K, value: &T) where | ||||
| 		T: ExtrasIndexable + Encodable, 
 | ||||
| 		K: ExtrasSliceConvertable; | ||||
| @ -21,10 +27,12 @@ pub trait ExtrasWritable { | ||||
| 
 | ||||
| /// trait used to read Extras data from db
 | ||||
| pub trait ExtrasReadable { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn get_extras<K, T>(&self, hash: &K) -> Option<T> where | ||||
| 		T: ExtrasIndexable + Decodable, | ||||
| 		K: ExtrasSliceConvertable; | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn extras_exists<K, T>(&self, hash: &K) -> bool where | ||||
| 		T: ExtrasIndexable, | ||||
| 		K: ExtrasSliceConvertable; | ||||
| @ -58,7 +66,10 @@ impl ExtrasReadable for DB { | ||||
| 
 | ||||
| /// Implementations should convert arbitrary type to database key slice
 | ||||
| pub trait ExtrasSliceConvertable { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn to_extras_slice(&self, i: ExtrasIndex) -> H264; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn as_h256(&self) -> Option<&H256> { None } | ||||
| } | ||||
| 
 | ||||
| impl ExtrasSliceConvertable for H256 { | ||||
| @ -67,6 +78,7 @@ impl ExtrasSliceConvertable for H256 { | ||||
| 		slice[32] = i as u8; | ||||
| 		slice | ||||
| 	} | ||||
| 	fn as_h256(&self) -> Option<&H256> { Some(self) } | ||||
| } | ||||
| 
 | ||||
| impl ExtrasSliceConvertable for U256 { | ||||
| @ -84,6 +96,7 @@ impl ExtrasSliceConvertable for BlockNumber { | ||||
| 
 | ||||
| /// Types implementing this trait can be indexed in extras database
 | ||||
| pub trait ExtrasIndexable { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn extras_index() -> ExtrasIndex; | ||||
| } | ||||
| 
 | ||||
| @ -96,9 +109,13 @@ impl ExtrasIndexable for H256 { | ||||
| /// Familial details concerning a block
 | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct BlockDetails { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub number: BlockNumber, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub total_difficulty: U256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub parent: H256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub children: Vec<H256> | ||||
| } | ||||
| 
 | ||||
| @ -141,6 +158,7 @@ impl Encodable for BlockDetails { | ||||
| /// Log blooms of certain block
 | ||||
| #[derive(Clone)] | ||||
| pub struct BlockLogBlooms { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub blooms: Vec<H2048> | ||||
| } | ||||
| 
 | ||||
| @ -174,6 +192,7 @@ impl Encodable for BlockLogBlooms { | ||||
| 
 | ||||
| /// Neighboring log blooms on certain level
 | ||||
| pub struct BlocksBlooms { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub blooms: [H2048; 16] | ||||
| } | ||||
| 
 | ||||
| @ -221,7 +240,9 @@ impl Encodable for BlocksBlooms { | ||||
| /// Represents address of certain transaction within block
 | ||||
| #[derive(Clone)] | ||||
| pub struct TransactionAddress { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub block_hash: H256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub index: u64 | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -2,6 +2,7 @@ use util::*; | ||||
| use basic_types::*; | ||||
| use time::now_utc; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub type BlockNumber = u64; | ||||
| 
 | ||||
| /// A block header.
 | ||||
| @ -13,25 +14,41 @@ pub type BlockNumber = u64; | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct Header { | ||||
| 	// TODO: make all private.
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub parent_hash: H256, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub timestamp: u64, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub number: BlockNumber, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub author: Address, | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub transactions_root: H256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub uncles_hash: H256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub extra_data: Bytes, | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub state_root: H256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub receipts_root: H256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub log_bloom: LogBloom, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub gas_used: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub gas_limit: U256, | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub difficulty: U256, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub seal: Vec<Bytes>, | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub hash: RefCell<Option<H256>>, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub bare_hash: RefCell<Option<H256>>, | ||||
| } | ||||
| 
 | ||||
| @ -61,32 +78,50 @@ impl Header { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn number(&self) -> BlockNumber { self.number } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn timestamp(&self) -> u64 { self.timestamp } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn author(&self) -> &Address { &self.author } | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn extra_data(&self) -> &Bytes { &self.extra_data } | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn state_root(&self) -> &H256 { &self.state_root } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn receipts_root(&self) -> &H256 { &self.receipts_root } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn gas_limit(&self) -> &U256 { &self.gas_limit } | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn difficulty(&self) -> &U256 { &self.difficulty } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn seal(&self) -> &Vec<Bytes> { &self.seal } | ||||
| 
 | ||||
| 	// TODO: seal_at, set_seal_at &c.
 | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_timestamp_now(&mut self) { self.timestamp = now_utc().to_timespec().sec as u64; self.note_dirty(); } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_author(&mut self, a: Address) { if a != self.author { self.author = a; self.note_dirty(); } } | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_extra_data(&mut self, a: Bytes) { if a != self.extra_data { self.extra_data = a; self.note_dirty(); } } | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_gas_used(&mut self, a: U256) { self.gas_used = a; self.note_dirty(); } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_gas_limit(&mut self, a: U256) { self.gas_limit = a; self.note_dirty(); } | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_difficulty(&mut self, a: U256) { self.difficulty = a; self.note_dirty(); } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn set_seal(&mut self, a: Vec<Bytes>) { self.seal = a; self.note_dirty(); } | ||||
| 
 | ||||
| 	/// Get the hash of this header (sha3 of the RLP).
 | ||||
| @ -120,6 +155,7 @@ impl Header { | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO: make these functions traity 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn stream_rlp(&self, s: &mut RlpStream, with_seal: Seal) { | ||||
| 		s.append_list(13 + match with_seal { Seal::With => self.seal.len(), _ => 0 }); | ||||
| 		s.append(&self.parent_hash); | ||||
| @ -141,12 +177,14 @@ impl Header { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn rlp(&self, with_seal: Seal) -> Bytes { | ||||
| 		let mut s = RlpStream::new(); | ||||
| 		self.stream_rlp(&mut s, with_seal); | ||||
| 		s.out() | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub fn rlp_sha3(&self, with_seal: Seal) -> H256 { self.rlp(with_seal).sha3() } | ||||
| } | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										27
									
								
								src/lib.rs
									
									
									
									
									
								
							
							
						
						
									
										27
									
								
								src/lib.rs
									
									
									
									
									
								
							| @ -1,6 +1,6 @@ | ||||
| #![warn(missing_docs)] | ||||
| #![feature(cell_extras)] | ||||
| #![feature(augmented_assignments)] | ||||
| #![feature(wrapping)] | ||||
| //#![feature(plugin)]
 | ||||
| //#![plugin(interpolate_idents)]
 | ||||
| //! Ethcore's ethereum implementation
 | ||||
| @ -89,31 +89,51 @@ extern crate evmjit; | ||||
| #[macro_use] | ||||
| extern crate ethcore_util as util; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod common; | ||||
| /// TODO [Tomusdrw] Please document me
 | ||||
| pub mod basic_types; | ||||
| #[macro_use] | ||||
| pub mod evm; | ||||
| pub mod error; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod log_entry; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod env_info; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod pod_account; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod pod_state; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod account_diff; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod state_diff; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod engine; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod state; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod account; | ||||
| pub mod action_params; | ||||
| /// TODO [debris] Please document me
 | ||||
| pub mod header; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod transaction; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod receipt; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod null_engine; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod builtin; | ||||
| /// TODO [debris] Please document me
 | ||||
| pub mod spec; | ||||
| pub mod views; | ||||
| pub mod blockchain; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod extras; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod substate; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod service; | ||||
| pub mod executive; | ||||
| pub mod externalities; | ||||
| @ -121,9 +141,14 @@ pub mod externalities; | ||||
| #[cfg(test)] | ||||
| mod tests; | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod client; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod sync; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod block; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod verification; | ||||
| /// TODO [debris] Please document me
 | ||||
| pub mod queue; | ||||
| pub mod ethereum; | ||||
|  | ||||
| @ -4,8 +4,11 @@ use basic_types::LogBloom; | ||||
| /// A single log's entry.
 | ||||
| #[derive(Debug,PartialEq,Eq)] | ||||
| pub struct LogEntry { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub address: Address, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub topics: Vec<H256>, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub data: Bytes, | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -11,6 +11,7 @@ pub struct NullEngine { | ||||
| } | ||||
| 
 | ||||
| impl NullEngine { | ||||
| 	/// TODO [Tomusdrw] Please document me
 | ||||
| 	pub fn new_boxed(spec: Spec) -> Box<Engine> { | ||||
| 		Box::new(NullEngine{ | ||||
| 			spec: spec, | ||||
|  | ||||
| @ -4,9 +4,13 @@ use account::*; | ||||
| #[derive(Debug,Clone,PartialEq,Eq)] | ||||
| /// Genesis account data. Does not have a DB overlay cache.
 | ||||
| pub struct PodAccount { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub balance: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub nonce: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub code: Bytes, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub storage: BTreeMap<H256, H256>, | ||||
| } | ||||
| 
 | ||||
| @ -27,6 +31,7 @@ impl PodAccount { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn rlp(&self) -> Bytes { | ||||
| 		let mut stream = RlpStream::new_list(4); | ||||
| 		stream.append(&self.nonce); | ||||
|  | ||||
| @ -2,6 +2,7 @@ use util::*; | ||||
| use pod_account::*; | ||||
| 
 | ||||
| #[derive(Debug,Clone,PartialEq,Eq)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct PodState (BTreeMap<Address, PodAccount>); | ||||
| 
 | ||||
| impl PodState { | ||||
|  | ||||
| @ -204,6 +204,7 @@ impl BlockQueue { | ||||
| 		verification.verified = new_verified; | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn drain(&mut self, max: usize) -> Vec<PreVerifiedBlock> { | ||||
| 		let mut verification = self.verification.lock().unwrap(); | ||||
| 		let count = min(max, verification.verified.len()); | ||||
|  | ||||
| @ -5,13 +5,18 @@ use log_entry::LogEntry; | ||||
| /// Information describing execution of a transaction.
 | ||||
| #[derive(Debug)] | ||||
| pub struct Receipt { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub state_root: H256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub gas_used: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub log_bloom: LogBloom, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub logs: Vec<LogEntry>, | ||||
| } | ||||
| 
 | ||||
| impl Receipt { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn new(state_root: H256, gas_used: U256, logs: Vec<LogEntry>) -> Receipt { | ||||
| 		Receipt { | ||||
| 			state_root: state_root, | ||||
|  | ||||
| @ -33,10 +33,12 @@ impl ClientService { | ||||
| 		}) | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn io(&mut self) -> &mut IoService<NetSyncMessage> { | ||||
| 		self.net_service.io() | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn client(&self) -> Arc<RwLock<Client>> { | ||||
| 		self.client.clone() | ||||
| 	} | ||||
| @ -47,8 +49,18 @@ struct ClientIoHandler { | ||||
| 	client: Arc<RwLock<Client>> | ||||
| } | ||||
| 
 | ||||
| const CLIENT_TICK_TIMER: TimerToken = 0; | ||||
| const CLIENT_TICK_MS: u64 = 5000; | ||||
| 
 | ||||
| impl IoHandler<NetSyncMessage> for ClientIoHandler { | ||||
| 	fn initialize(&self, _io: &IoContext<NetSyncMessage>) { | ||||
| 	fn initialize(&self, io: &IoContext<NetSyncMessage>) { | ||||
| 		io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK_MS).expect("Error registering client timer"); | ||||
| 	} | ||||
| 
 | ||||
| 	fn timeout(&self, _io: &IoContext<NetSyncMessage>, timer: TimerToken) { | ||||
| 		if timer == CLIENT_TICK_TIMER { | ||||
| 			self.client.read().unwrap().tick(); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	fn message(&self, _io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) { | ||||
|  | ||||
							
								
								
									
										21
									
								
								src/spec.rs
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								src/spec.rs
									
									
									
									
									
								
							| @ -51,6 +51,7 @@ pub struct GenesisAccount { | ||||
| } | ||||
| 
 | ||||
| impl GenesisAccount { | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn rlp(&self) -> Bytes { | ||||
| 		let mut stream = RlpStream::new_list(4); | ||||
| 		stream.append(&self.nonce); | ||||
| @ -66,27 +67,41 @@ impl GenesisAccount { | ||||
| #[derive(Debug)] | ||||
| pub struct Spec { | ||||
| 	// User friendly spec name
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub name: String, | ||||
| 	// What engine are we using for this?
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub engine_name: String, | ||||
| 
 | ||||
| 	// Parameters concerning operation of the specific engine we're using.
 | ||||
| 	// Name -> RLP-encoded value
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub engine_params: HashMap<String, Bytes>, | ||||
| 
 | ||||
| 	// Builtin-contracts are here for now but would like to abstract into Engine API eventually.
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub builtins: HashMap<Address, Builtin>, | ||||
| 
 | ||||
| 	// Genesis params.
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub parent_hash: H256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub author: Address, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub difficulty: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub gas_limit: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub gas_used: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub timestamp: u64, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub extra_data: Bytes, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub genesis_state: HashMap<Address, GenesisAccount>, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub seal_fields: usize, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub seal_rlp: Bytes, | ||||
| 
 | ||||
| 	// May be prepopulated if we know this in advance.
 | ||||
| @ -112,6 +127,7 @@ impl Spec { | ||||
| 		self.state_root_memo.read().unwrap().as_ref().unwrap().clone() | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn genesis_header(&self) -> Header { | ||||
| 		Header { | ||||
| 			parent_hash: self.parent_hash.clone(), | ||||
| @ -220,7 +236,7 @@ impl FromJson for Spec { | ||||
| 
 | ||||
| impl Spec { | ||||
| 	/// Ensure that the given state DB has the trie nodes in for the genesis state.
 | ||||
| 	pub fn ensure_db_good(&self, db: &mut HashDB) { | ||||
| 	pub fn ensure_db_good(&self, db: &mut HashDB) -> bool { | ||||
| 		if !db.contains(&self.state_root()) { | ||||
| 			info!("Populating genesis state..."); | ||||
| 			let mut root = H256::new(); 
 | ||||
| @ -232,7 +248,8 @@ impl Spec { | ||||
| 			} | ||||
| 			assert!(db.contains(&self.state_root())); | ||||
| 			info!("Genesis state is ready"); | ||||
| 		} | ||||
| 			true | ||||
| 		} else { false } | ||||
| 	} | ||||
| 
 | ||||
| 	/// Create a new Spec from a JSON UTF-8 data resource `data`.
 | ||||
|  | ||||
							
								
								
									
										14
									
								
								src/state.rs
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								src/state.rs
									
									
									
									
									
								
							| @ -5,12 +5,13 @@ use pod_account::*; | ||||
| use pod_state::*; | ||||
| use state_diff::*; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub type ApplyResult = Result<Receipt, Error>; | ||||
| 
 | ||||
| /// Representation of the entire state of all accounts in the system.
 | ||||
| #[derive(Clone)] | ||||
| pub struct State { | ||||
| 	db: OverlayDB, | ||||
| 	db: JournalDB, | ||||
| 	root: H256, | ||||
| 	cache: RefCell<HashMap<Address, Option<Account>>>, | ||||
| 
 | ||||
| @ -19,7 +20,7 @@ pub struct State { | ||||
| 
 | ||||
| impl State { | ||||
| 	/// Creates new state with empty state root
 | ||||
| 	pub fn new(mut db: OverlayDB, account_start_nonce: U256) -> State { | ||||
| 	pub fn new(mut db: JournalDB, account_start_nonce: U256) -> State { | ||||
| 		let mut root = H256::new(); | ||||
| 		{ | ||||
| 			// init trie and reset root too null
 | ||||
| @ -35,7 +36,7 @@ impl State { | ||||
| 	} | ||||
| 
 | ||||
| 	/// Creates new state with existing state root
 | ||||
| 	pub fn from_existing(db: OverlayDB, root: H256, account_start_nonce: U256) -> State { | ||||
| 	pub fn from_existing(db: JournalDB, root: H256, account_start_nonce: U256) -> State { | ||||
| 		{ | ||||
| 			// trie should panic! if root does not exist
 | ||||
| 			let _ = SecTrieDB::new(&db, &root); | ||||
| @ -51,11 +52,11 @@ impl State { | ||||
| 
 | ||||
| 	/// Create temporary state object
 | ||||
| 	pub fn new_temp() -> State { | ||||
| 		Self::new(OverlayDB::new_temp(), U256::from(0u8)) | ||||
| 		Self::new(JournalDB::new_temp(), U256::from(0u8)) | ||||
| 	} | ||||
| 
 | ||||
| 	/// Destroy the current object and return root and database.
 | ||||
| 	pub fn drop(self) -> (H256, OverlayDB) { | ||||
| 	pub fn drop(self) -> (H256, JournalDB) { | ||||
| 		(self.root, self.db) | ||||
| 	} | ||||
| 
 | ||||
| @ -65,7 +66,7 @@ impl State { | ||||
| 	} | ||||
| 
 | ||||
| 	/// Expose the underlying database; good to use for calling `state.db().commit()`.
 | ||||
| 	pub fn db(&mut self) -> &mut OverlayDB { | ||||
| 	pub fn db(&mut self) -> &mut JournalDB { | ||||
| 		&mut self.db | ||||
| 	} | ||||
| 
 | ||||
| @ -157,6 +158,7 @@ impl State { | ||||
| 		Ok(receipt) | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub fn revert(&mut self, backup: State) { | ||||
| 		self.cache = backup.cache; | ||||
| 	} | ||||
|  | ||||
| @ -3,6 +3,7 @@ use pod_state::*; | ||||
| use account_diff::*; | ||||
| 
 | ||||
| #[derive(Debug,Clone,PartialEq,Eq)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct StateDiff (BTreeMap<Address, AccountDiff>); | ||||
| 
 | ||||
| impl StateDiff { | ||||
|  | ||||
| @ -25,6 +25,7 @@ impl Substate { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn accrue(&mut self, s: Substate) { | ||||
| 		self.suicides.extend(s.suicides.into_iter()); | ||||
| 		self.logs.extend(s.logs.into_iter()); | ||||
|  | ||||
| @ -475,7 +475,7 @@ impl ChainSync { | ||||
| 	pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { | ||||
| 		trace!(target: "sync", "== Disconnecting {}", peer); | ||||
| 		if self.peers.contains_key(&peer) { | ||||
| 			info!(target: "sync", "Disconneced {}:{}", peer, io.peer_info(peer)); | ||||
| 			info!(target: "sync", "Disconnected {}:{}", peer, io.peer_info(peer)); | ||||
| 			self.clear_peer_download(peer); | ||||
| 			self.peers.remove(&peer); | ||||
| 			self.continue_sync(io); | ||||
|  | ||||
| @ -48,6 +48,7 @@ pub enum SyncMessage { | ||||
| 	BlockVerified, | ||||
| } | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub type NetSyncMessage = NetworkIoMessage<SyncMessage>; | ||||
| 
 | ||||
| /// Ethereum network protocol handler
 | ||||
|  | ||||
| @ -4,8 +4,11 @@ use error::*; | ||||
| use evm::Schedule; | ||||
| 
 | ||||
| #[derive(Debug,Clone)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub enum Action { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Create, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	Call(Address), | ||||
| } | ||||
| 
 | ||||
| @ -13,16 +16,25 @@ pub enum Action { | ||||
| /// or contract creation operation.
 | ||||
| #[derive(Debug,Clone)] | ||||
| pub struct Transaction { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub nonce: U256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub gas_price: U256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub gas: U256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub action: Action, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub value: U256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub data: Bytes, | ||||
| 
 | ||||
| 	// signature
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub v: u8, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub r: U256, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub s: U256, | ||||
| 
 | ||||
| 	hash: RefCell<Option<H256>>, | ||||
| @ -30,6 +42,7 @@ pub struct Transaction { | ||||
| } | ||||
| 
 | ||||
| impl Transaction { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn new() -> Self { | ||||
| 		Transaction { | ||||
| 			nonce: x!(0), | ||||
|  | ||||
							
								
								
									
										1
									
								
								util/cov.sh
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								util/cov.sh
									
									
									
									
									
										Symbolic link
									
								
							| @ -0,0 +1 @@ | ||||
| ../cov.sh | ||||
| @ -43,6 +43,7 @@ use std::ops::{Deref, DerefMut}; | ||||
| use uint::{Uint, U128, U256}; | ||||
| use hash::FixedHash; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct PrettySlice<'a> (&'a [u8]); | ||||
| 
 | ||||
| impl<'a> fmt::Debug for PrettySlice<'a> { | ||||
| @ -66,8 +67,11 @@ impl<'a> fmt::Display for PrettySlice<'a> { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub trait ToPretty { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn pretty(&self) -> PrettySlice; | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn to_hex(&self) -> String { | ||||
| 		format!("{}", self.pretty()) | ||||
| 	} | ||||
| @ -90,8 +94,11 @@ impl ToPretty for Bytes { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub enum BytesRef<'a> { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	Flexible(&'a mut Bytes), | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	Fixed(&'a mut [u8]) | ||||
| } | ||||
| 
 | ||||
| @ -121,8 +128,11 @@ pub type Bytes = Vec<u8>; | ||||
| /// Slice of bytes to underlying memory
 | ||||
| pub trait BytesConvertable { | ||||
| 	// TODO: rename to as_slice
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn bytes(&self) -> &[u8]; | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn as_slice(&self) -> &[u8] { self.bytes() } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn to_bytes(&self) -> Bytes { self.as_slice().to_vec() } | ||||
| } | ||||
| 
 | ||||
| @ -160,8 +170,11 @@ fn bytes_convertable() { | ||||
| ///
 | ||||
| /// TODO: optimise some conversations
 | ||||
| pub trait ToBytes { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn to_bytes(&self) -> Vec<u8>; | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn to_bytes_len(&self) -> usize { self.to_bytes().len() } | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn first_byte(&self) -> Option<u8> { self.to_bytes().first().map(|&x| { x })} | ||||
| } | ||||
| 
 | ||||
| @ -257,7 +270,9 @@ impl <T>ToBytes for T where T: FixedHash { | ||||
| /// Error returned when FromBytes conversation goes wrong
 | ||||
| #[derive(Debug, PartialEq, Eq)] | ||||
| pub enum FromBytesError { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	DataIsTooShort, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	DataIsTooLong | ||||
| } | ||||
| 
 | ||||
| @ -278,6 +293,7 @@ pub type FromBytesResult<T> = Result<T, FromBytesError>; | ||||
| ///
 | ||||
| /// TODO: check size of bytes before conversation and return appropriate error
 | ||||
| pub trait FromBytes: Sized { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn from_bytes(bytes: &[u8]) -> FromBytesResult<Self>; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -49,7 +49,9 @@ use sha3::*; | ||||
| /// index. Their `BloomIndex` can be created from block number and given level.
 | ||||
| #[derive(Eq, PartialEq, Hash, Clone, Debug)] | ||||
| pub struct BloomIndex { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub level: u8, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub index: usize, | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -46,6 +46,7 @@ macro_rules! flushln { | ||||
| 	($fmt:expr, $($arg:tt)*) => (flush!(concat!($fmt, "\n"), $($arg)*)); | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub fn flush(s: String) { | ||||
| 	::std::io::stdout().write(s.as_bytes()).unwrap(); | ||||
| 	::std::io::stdout().flush().unwrap(); | ||||
|  | ||||
| @ -4,8 +4,11 @@ use uint::*; | ||||
| use secp256k1::{key, Secp256k1}; | ||||
| use rand::os::OsRng; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub type Secret = H256; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub type Public = H512; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub type Signature = H520; | ||||
| 
 | ||||
| lazy_static! { | ||||
| @ -33,11 +36,17 @@ impl Signature { | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub enum CryptoError { | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidSecret, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidPublic, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidSignature, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	InvalidMessage, | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	Io(::std::io::Error), | ||||
| } | ||||
| 
 | ||||
| @ -122,6 +131,7 @@ impl KeyPair { | ||||
| 	pub fn sign(&self, message: &H256) -> Result<Signature, CryptoError> { ec::sign(&self.secret, message) } | ||||
| } | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod ec { | ||||
| 	use hash::*; | ||||
| 	use uint::*; | ||||
| @ -151,6 +161,12 @@ pub mod ec { | ||||
| 		let mut signature: crypto::Signature = unsafe { ::std::mem::uninitialized() }; | ||||
| 		signature.clone_from_slice(&data); | ||||
| 		signature[64] = rec_id.to_i32() as u8; | ||||
| 
 | ||||
| 		let (_, s, v) = signature.to_rsv(); | ||||
| 		let secp256k1n = U256::from_str("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141").unwrap(); | ||||
| 		if !is_low_s(&s) { | ||||
| 			signature = super::Signature::from_rsv(&H256::from_slice(&signature[0..32]), &H256::from(secp256k1n - s), v ^ 1); | ||||
| 		} | ||||
| 		Ok(signature) | ||||
| 	} | ||||
| 	/// Verify signature.
 | ||||
| @ -174,7 +190,7 @@ pub mod ec { | ||||
| 
 | ||||
| 	/// Check if this is a "low" signature.
 | ||||
| 	pub fn is_low(sig: &Signature) -> bool { | ||||
| 		H256::from_slice(&sig[32..64]) <= h256_from_hex("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0") | ||||
| 		H256::from_slice(&sig[32..64]) <= h256_from_hex("7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0") | ||||
| 	} | ||||
| 
 | ||||
| 	/// Check if this is a "low" signature.
 | ||||
| @ -192,10 +208,12 @@ pub mod ec { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod ecdh { | ||||
| 	use crypto::*; | ||||
| 	use crypto::{self}; | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn agree(secret: &Secret, public: &Public, ) -> Result<Secret, CryptoError> { | ||||
| 		use secp256k1::*; | ||||
| 		let context = &crypto::SECP256K1; | ||||
| @ -211,11 +229,13 @@ pub mod ecdh { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod ecies { | ||||
| 	use hash::*; | ||||
| 	use bytes::*; | ||||
| 	use crypto::*; | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn encrypt(public: &Public, plain: &[u8]) -> Result<Bytes, CryptoError> { | ||||
| 		use ::rcrypto::digest::Digest; | ||||
| 		use ::rcrypto::sha2::Sha256; | ||||
| @ -251,6 +271,7 @@ pub mod ecies { | ||||
| 		Ok(msg) | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn decrypt(secret: &Secret, encrypted: &[u8]) -> Result<Bytes, CryptoError> { | ||||
| 		use ::rcrypto::digest::Digest; | ||||
| 		use ::rcrypto::sha2::Sha256; | ||||
| @ -316,17 +337,20 @@ pub mod ecies { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub mod aes { | ||||
| 	use ::rcrypto::blockmodes::*; | ||||
| 	use ::rcrypto::aessafe::*; | ||||
| 	use ::rcrypto::symmetriccipher::*; | ||||
| 	use ::rcrypto::buffer::*; | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) { | ||||
| 		let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); | ||||
| 		encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) { | ||||
| 		let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); | ||||
| 		encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); | ||||
|  | ||||
| @ -6,22 +6,36 @@ use rlp::DecoderError; | ||||
| use io; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub enum BaseDataError { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	NegativelyReferencedHash, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| /// General error type which should be capable of representing all errors in ethcore.
 | ||||
| pub enum UtilError { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Crypto(::crypto::CryptoError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	StdIo(::std::io::Error), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Io(io::IoError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	AddressParse(::std::net::AddrParseError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	AddressResolve(Option<::std::io::Error>), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	FromHex(FromHexError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	BaseData(BaseDataError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Network(NetworkError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Decoder(DecoderError), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	SimpleString(String), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	BadSize, | ||||
| } | ||||
| 
 | ||||
| @ -73,6 +87,12 @@ impl From<::rlp::DecoderError> for UtilError { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| impl From<String> for UtilError { | ||||
| 	fn from(err: String) -> UtilError { | ||||
| 		UtilError::SimpleString(err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted.
 | ||||
| /*#![feature(concat_idents)]
 | ||||
| macro_rules! assimilate { | ||||
|  | ||||
| @ -7,6 +7,8 @@ macro_rules! xjson { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub trait FromJson { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn from_json(json: &Json) -> Self; | ||||
| } | ||||
|  | ||||
| @ -13,20 +13,33 @@ use uint::{Uint, U256}; | ||||
| ///
 | ||||
| /// Note: types implementing `FixedHash` must be also `BytesConvertable`.
 | ||||
| pub trait FixedHash: Sized + BytesConvertable + Populatable + FromStr + Default { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn new() -> Self; | ||||
| 	/// Synonym for `new()`. Prefer to new as it's more readable.
 | ||||
| 	fn zero() -> Self; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn random() -> Self; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn randomize(&mut self); | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	fn size() -> usize; | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	fn from_slice(src: &[u8]) -> Self; | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	fn clone_from_slice(&mut self, src: &[u8]) -> usize; | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn copy_to(&self, dest: &mut [u8]); | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn shift_bloomed<'a, T>(&'a mut self, b: &T) -> &'a mut Self where T: FixedHash; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn with_bloomed<T>(mut self, b: &T) -> Self where T: FixedHash { self.shift_bloomed(b); self } | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn bloom_part<T>(&self, m: usize) -> T where T: FixedHash; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn contains_bloomed<T>(&self, b: &T) -> bool where T: FixedHash; | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	fn contains<'a>(&'a self, b: &'a Self) -> bool; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn is_zero(&self) -> bool; | ||||
| } | ||||
| 
 | ||||
| @ -41,6 +54,7 @@ fn clean_0x(s: &str) -> &str { | ||||
| macro_rules! impl_hash { | ||||
| 	($from: ident, $size: expr) => { | ||||
| 		#[derive(Eq)] | ||||
| 		/// TODO [Gav Wood] Please document me
 | ||||
| 		pub struct $from (pub [u8; $size]); | ||||
| 
 | ||||
| 		impl BytesConvertable for $from { | ||||
| @ -396,10 +410,12 @@ macro_rules! impl_hash { | ||||
| 		} | ||||
| 
 | ||||
| 		impl $from { | ||||
| 			/// TODO [Gav Wood] Please document me
 | ||||
| 			pub fn hex(&self) -> String { | ||||
| 				format!("{:?}", self) | ||||
| 			} | ||||
| 
 | ||||
| 			/// TODO [Gav Wood] Please document me
 | ||||
| 			pub fn from_bloomed<T>(b: &T) -> Self where T: FixedHash { b.bloom_part($size) } | ||||
| 		} | ||||
| 
 | ||||
| @ -503,21 +519,25 @@ impl<'_> From<&'_ Address> for H256 { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub fn h256_from_hex(s: &str) -> H256 { | ||||
| 	use std::str::FromStr; | ||||
| 	H256::from_str(s).unwrap() | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub fn h256_from_u64(n: u64) -> H256 { | ||||
| 	use uint::U256; | ||||
| 	H256::from(&U256::from(n)) | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub fn address_from_hex(s: &str) -> Address { | ||||
| 	use std::str::FromStr; | ||||
| 	Address::from_str(s).unwrap() | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub fn address_from_u64(n: u64) -> Address { | ||||
| 	let h256 = h256_from_u64(n); | ||||
| 	From::from(h256) | ||||
|  | ||||
| @ -41,7 +41,9 @@ mod worker; | ||||
| use mio::{EventLoop, Token}; | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub enum IoError { | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	Mio(::std::io::Error), | ||||
| } | ||||
| 
 | ||||
| @ -73,13 +75,20 @@ pub trait IoHandler<Message>: Send + Sync where Message: Send + Sync + Clone + ' | ||||
| 	fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {} | ||||
| } | ||||
| 
 | ||||
| pub type TimerToken = service::TimerToken; | ||||
| pub type StreamToken = service::StreamToken; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use io::service::TimerToken; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use io::service::StreamToken; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use io::service::IoContext; | ||||
| pub type IoService<Message> = service::IoService<Message>; | ||||
| pub type IoChannel<Message> = service::IoChannel<Message>; | ||||
| pub type IoManager<Message> = service::IoManager<Message>; | ||||
| pub const TOKENS_PER_HANDLER: usize = service::TOKENS_PER_HANDLER; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use io::service::IoService; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use io::service::IoChannel; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use io::service::IoManager; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use io::service::TOKENS_PER_HANDLER; | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|  | ||||
							
								
								
									
										104
									
								
								util/src/io/worker.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								util/src/io/worker.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,104 @@ | ||||
| use std::sync::*; | ||||
| use std::mem; | ||||
| use std::thread::{JoinHandle, self}; | ||||
| use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; | ||||
| use crossbeam::sync::chase_lev; | ||||
| use io::service::{HandlerId, IoChannel, IoContext}; | ||||
| use io::{IoHandler}; | ||||
| 
 | ||||
| pub enum WorkType<Message> { | ||||
| 	Readable, | ||||
| 	Writable, | ||||
| 	Hup, | ||||
| 	Timeout, | ||||
| 	Message(Message) | ||||
| } | ||||
| 
 | ||||
| pub struct Work<Message> { | ||||
| 	pub work_type: WorkType<Message>, | ||||
| 	pub token: usize, | ||||
| 	pub handler_id: HandlerId, | ||||
| 	pub handler: Arc<IoHandler<Message>>, | ||||
| } | ||||
| 
 | ||||
| /// An IO worker thread
 | ||||
| /// Sorts them ready for blockchain insertion.
 | ||||
| pub struct Worker { | ||||
| 	thread: Option<JoinHandle<()>>, | ||||
| 	wait: Arc<Condvar>, | ||||
| 	deleting: Arc<AtomicBool>, | ||||
| } | ||||
| 
 | ||||
| impl Worker { | ||||
| 	/// Creates a new worker instance.
 | ||||
| 	pub fn new<Message>(index: usize, 
 | ||||
| 						stealer: chase_lev::Stealer<Work<Message>>, 
 | ||||
| 						channel: IoChannel<Message>, | ||||
| 						wait: Arc<Condvar>, | ||||
| 						wait_mutex: Arc<Mutex<bool>>) -> Worker 
 | ||||
| 						where Message: Send + Sync + Clone + 'static { | ||||
| 		let deleting = Arc::new(AtomicBool::new(false)); | ||||
| 		let mut worker = Worker { | ||||
| 			thread: None, | ||||
| 			wait: wait.clone(), | ||||
| 			deleting: deleting.clone(), | ||||
| 		}; | ||||
| 		worker.thread = Some(thread::Builder::new().name(format!("IO Worker #{}", index)).spawn( | ||||
| 			move || Worker::work_loop(stealer, channel.clone(), wait, wait_mutex.clone(), deleting)) | ||||
| 			.expect("Error creating worker thread")); | ||||
| 		worker | ||||
| 	} | ||||
| 
 | ||||
| 	fn work_loop<Message>(stealer: chase_lev::Stealer<Work<Message>>, | ||||
| 						channel: IoChannel<Message>, wait: Arc<Condvar>, 
 | ||||
| 						wait_mutex: Arc<Mutex<bool>>, 
 | ||||
| 						deleting: Arc<AtomicBool>) 
 | ||||
| 						where Message: Send + Sync + Clone + 'static { | ||||
| 		while !deleting.load(AtomicOrdering::Relaxed) { | ||||
| 			{ | ||||
| 				let lock = wait_mutex.lock().unwrap(); | ||||
| 				let _ = wait.wait(lock).unwrap(); | ||||
| 				if deleting.load(AtomicOrdering::Relaxed) { | ||||
| 					return; | ||||
| 				} | ||||
| 			} | ||||
| 			loop { | ||||
| 				match stealer.steal() { | ||||
| 					chase_lev::Steal::Data(work) => { | ||||
| 						Worker::do_work(work, channel.clone()); | ||||
| 					} | ||||
| 					_ => break
 | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	fn do_work<Message>(work: Work<Message>, channel: IoChannel<Message>) where Message: Send + Sync + Clone + 'static { | ||||
| 		match work.work_type { | ||||
| 			WorkType::Readable => { | ||||
| 				work.handler.stream_readable(&mut IoContext::new(channel, work.handler_id), work.token); | ||||
| 			}, | ||||
| 			WorkType::Writable => { | ||||
| 				work.handler.stream_writable(&mut IoContext::new(channel, work.handler_id), work.token); | ||||
| 			} | ||||
| 			WorkType::Hup => { | ||||
| 				work.handler.stream_hup(&mut IoContext::new(channel, work.handler_id), work.token); | ||||
| 			} | ||||
| 			WorkType::Timeout => { | ||||
| 				work.handler.timeout(&mut IoContext::new(channel, work.handler_id), work.token); | ||||
| 			} | ||||
| 			WorkType::Message(message) => { | ||||
| 				work.handler.message(&mut IoContext::new(channel, work.handler_id), &message); | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| impl Drop for Worker { | ||||
| 	fn drop(&mut self) { | ||||
| 		self.deleting.store(true, AtomicOrdering::Relaxed); | ||||
| 		self.wait.notify_all(); | ||||
| 		let thread = mem::replace(&mut self.thread, None).unwrap(); | ||||
| 		thread.join().unwrap(); | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										214
									
								
								util/src/journaldb.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										214
									
								
								util/src/journaldb.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,214 @@ | ||||
| //! Disk-backed HashDB implementation.
 | ||||
| 
 | ||||
| use std::env; | ||||
| use common::*; | ||||
| use rlp::*; | ||||
| use hashdb::*; | ||||
| use overlaydb::*; | ||||
| use rocksdb::{DB, Writable}; | ||||
| 
 | ||||
| #[derive(Clone)] | ||||
| /// Implementation of the HashDB trait for a disk-backed database with a memory overlay
 | ||||
| /// and latent-removal semantics.
 | ||||
| ///
 | ||||
| /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to 
 | ||||
| /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
 | ||||
| /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
 | ||||
| /// the removals actually take effect.
 | ||||
| pub struct JournalDB { | ||||
| 	forward: OverlayDB, | ||||
| 	backing: Arc<DB>, | ||||
| 	inserts: Vec<H256>, | ||||
| 	removes: Vec<H256>, | ||||
| } | ||||
| 
 | ||||
| impl JournalDB { | ||||
| 	/// Create a new instance given a `backing` database.
 | ||||
| 	pub fn new(backing: DB) -> JournalDB { | ||||
| 		let db = Arc::new(backing); | ||||
| 		JournalDB { | ||||
| 			forward: OverlayDB::new_with_arc(db.clone()), | ||||
| 			backing: db, | ||||
| 			inserts: vec![], | ||||
| 			removes: vec![], | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// Create a new instance with an anonymous temporary database.
 | ||||
| 	pub fn new_temp() -> JournalDB { | ||||
| 		let mut dir = env::temp_dir(); | ||||
| 		dir.push(H32::random().hex()); | ||||
| 		Self::new(DB::open_default(dir.to_str().unwrap()).unwrap()) | ||||
| 	} | ||||
| 
 | ||||
| 	/// Get a clone of the overlay db portion of this.
 | ||||
| 	pub fn to_overlaydb(&self) -> OverlayDB { self.forward.clone() } | ||||
| 
 | ||||
| 	/// Commit all recent insert operations and historical removals from the old era
 | ||||
| 	/// to the backing database.
 | ||||
| 	pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> { | ||||
| 		// journal format: 
 | ||||
| 		// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
 | ||||
| 		// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
 | ||||
| 		// [era, n] => [ ... ]
 | ||||
| 
 | ||||
| 		// TODO: store last_era, reclaim_period.
 | ||||
| 
 | ||||
| 		// when we make a new commit, we journal the inserts and removes.
 | ||||
| 		// for each end_era that we journaled that we are no passing by, 
 | ||||
| 		// we remove all of its removes assuming it is canonical and all
 | ||||
| 		// of its inserts otherwise.
 | ||||
| 
 | ||||
| 		// record new commit's details.
 | ||||
| 		{ | ||||
| 			let mut index = 0usize; | ||||
| 			let mut last; | ||||
| 
 | ||||
| 			while try!(self.backing.get({ | ||||
| 				let mut r = RlpStream::new_list(2); | ||||
| 				r.append(&now); | ||||
| 				r.append(&index); | ||||
| 				last = r.drain(); | ||||
| 				&last | ||||
| 			})).is_some() { | ||||
| 				index += 1; | ||||
| 			} | ||||
| 
 | ||||
| 			let mut r = RlpStream::new_list(3); | ||||
| 			r.append(id); | ||||
| 			r.append(&self.inserts); | ||||
| 			r.append(&self.removes); | ||||
| 			try!(self.backing.put(&last, r.as_raw())); | ||||
| 			self.inserts.clear(); | ||||
| 			self.removes.clear(); | ||||
| 		} | ||||
| 
 | ||||
| 		// apply old commits' details
 | ||||
| 		if let Some((end_era, canon_id)) = end { | ||||
| 			let mut index = 0usize; | ||||
| 			let mut last; | ||||
| 			while let Some(rlp_data) = try!(self.backing.get({ | ||||
| 				let mut r = RlpStream::new_list(2); | ||||
| 				r.append(&end_era); | ||||
| 				r.append(&index); | ||||
| 				last = r.drain(); | ||||
| 				&last | ||||
| 			})) { | ||||
| 				let rlp = Rlp::new(&rlp_data); | ||||
| 				let to_remove: Vec<H256> = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1}); | ||||
| 				for i in to_remove.iter() { | ||||
| 					self.forward.remove(i); | ||||
| 				} | ||||
| 				try!(self.backing.delete(&last)); | ||||
| 				trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len()); | ||||
| 				index += 1; | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		self.forward.commit() | ||||
| 	} | ||||
| 
 | ||||
| 	/// Revert all operations on this object (i.e. `insert()`s and `removes()`s) since the
 | ||||
| 	/// last `commit()`.
 | ||||
| 	pub fn revert(&mut self) { self.forward.revert(); self.removes.clear(); } | ||||
| } | ||||
| 
 | ||||
| impl HashDB for JournalDB { | ||||
| 	fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() } | ||||
| 	fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) } | ||||
| 	fn exists(&self, key: &H256) -> bool { self.forward.exists(key) } | ||||
| 	fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r } | ||||
| 	fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } | ||||
| 	fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
| 	use common::*; | ||||
| 	use super::*; | ||||
| 	use hashdb::*; | ||||
| 
 | ||||
| 	#[test] | ||||
| 	fn long_history() { | ||||
| 		// history is 3
 | ||||
| 		let mut jdb = JournalDB::new_temp(); | ||||
| 		let h = jdb.insert(b"foo"); | ||||
| 		jdb.commit(0, &b"0".sha3(), None).unwrap(); | ||||
| 		assert!(jdb.exists(&h)); | ||||
| 		jdb.remove(&h); | ||||
| 		jdb.commit(1, &b"1".sha3(), None).unwrap(); | ||||
| 		assert!(jdb.exists(&h)); | ||||
| 		jdb.commit(2, &b"2".sha3(), None).unwrap(); | ||||
| 		assert!(jdb.exists(&h)); | ||||
| 		jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); | ||||
| 		assert!(jdb.exists(&h)); | ||||
| 		jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); | ||||
| 		assert!(!jdb.exists(&h)); | ||||
| 	} | ||||
| 
 | ||||
| 	#[test] | ||||
| 	fn complex() { | ||||
| 		// history is 1
 | ||||
| 		let mut jdb = JournalDB::new_temp(); | ||||
| 
 | ||||
| 		let foo = jdb.insert(b"foo"); | ||||
| 		let bar = jdb.insert(b"bar"); | ||||
| 		jdb.commit(0, &b"0".sha3(), None).unwrap(); | ||||
| 		assert!(jdb.exists(&foo)); | ||||
| 		assert!(jdb.exists(&bar)); | ||||
| 
 | ||||
| 		jdb.remove(&foo); | ||||
| 		jdb.remove(&bar); | ||||
| 		let baz = jdb.insert(b"baz"); | ||||
| 		jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); | ||||
| 		assert!(jdb.exists(&foo)); | ||||
| 		assert!(jdb.exists(&bar)); | ||||
| 		assert!(jdb.exists(&baz)); | ||||
| 
 | ||||
| 		let foo = jdb.insert(b"foo"); | ||||
| 		jdb.remove(&baz); | ||||
| 		jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); | ||||
| 		assert!(jdb.exists(&foo)); | ||||
| 		assert!(!jdb.exists(&bar)); | ||||
| 		assert!(jdb.exists(&baz)); | ||||
| 
 | ||||
| 		jdb.remove(&foo); | ||||
| 		jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); | ||||
| 		assert!(jdb.exists(&foo)); | ||||
| 		assert!(!jdb.exists(&bar)); | ||||
| 		assert!(!jdb.exists(&baz)); | ||||
| 
 | ||||
| 		jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); | ||||
| 		assert!(!jdb.exists(&foo)); | ||||
| 		assert!(!jdb.exists(&bar)); | ||||
| 		assert!(!jdb.exists(&baz)); | ||||
| 	} | ||||
| 
 | ||||
| 	#[test] | ||||
| 	fn fork() { | ||||
| 		// history is 1
 | ||||
| 		let mut jdb = JournalDB::new_temp(); | ||||
| 
 | ||||
| 		let foo = jdb.insert(b"foo"); | ||||
| 		let bar = jdb.insert(b"bar"); | ||||
| 		jdb.commit(0, &b"0".sha3(), None).unwrap(); | ||||
| 		assert!(jdb.exists(&foo)); | ||||
| 		assert!(jdb.exists(&bar)); | ||||
| 
 | ||||
| 		jdb.remove(&foo); | ||||
| 		let baz = jdb.insert(b"baz"); | ||||
| 		jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); | ||||
| 
 | ||||
| 		jdb.remove(&bar); | ||||
| 		jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); | ||||
| 
 | ||||
| 		assert!(jdb.exists(&foo)); | ||||
| 		assert!(jdb.exists(&bar)); | ||||
| 		assert!(jdb.exists(&baz)); | ||||
| 
 | ||||
| 		jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); | ||||
| 		assert!(jdb.exists(&foo)); | ||||
| 		assert!(!jdb.exists(&baz)); | ||||
| 		assert!(!jdb.exists(&bar)); | ||||
| 	} | ||||
| } | ||||
| @ -1,5 +1,6 @@ | ||||
| use common::*; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub fn clean(s: &str) -> &str { | ||||
| 	if s.len() >= 2 && &s[0..2] == "0x" { | ||||
| 		&s[2..] | ||||
|  | ||||
| @ -1,7 +1,7 @@ | ||||
| #![warn(missing_docs)] | ||||
| #![feature(op_assign_traits)] | ||||
| #![feature(augmented_assignments)] | ||||
| #![feature(associated_consts)] | ||||
| #![feature(wrapping)] | ||||
| //! Ethcore-util library
 | ||||
| //!
 | ||||
| //! ### Rust version:
 | ||||
| @ -53,33 +53,46 @@ extern crate arrayvec; | ||||
| extern crate elastic_array; | ||||
| extern crate crossbeam; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod standard; | ||||
| #[macro_use] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod from_json; | ||||
| #[macro_use] | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod common; | ||||
| pub mod error; | ||||
| pub mod hash; | ||||
| pub mod uint; | ||||
| pub mod bytes; | ||||
| pub mod rlp; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod misc; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod json_aid; | ||||
| pub mod vector; | ||||
| pub mod sha3; | ||||
| pub mod hashdb; | ||||
| pub mod memorydb; | ||||
| pub mod overlaydb; | ||||
| pub mod journaldb; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod math; | ||||
| pub mod chainfilter; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod crypto; | ||||
| pub mod triehash; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod trie; | ||||
| pub mod nibbleslice; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod heapsizeof; | ||||
| pub mod squeeze; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod semantic_version; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod io; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod network; | ||||
| 
 | ||||
| pub use common::*; | ||||
| @ -89,6 +102,7 @@ pub use rlp::*; | ||||
| pub use hashdb::*; | ||||
| pub use memorydb::*; | ||||
| pub use overlaydb::*; | ||||
| pub use journaldb::*; | ||||
| pub use math::*; | ||||
| pub use chainfilter::*; | ||||
| pub use crypto::*; | ||||
|  | ||||
| @ -107,12 +107,14 @@ impl MemoryDB { | ||||
| 		self.data.get(key) | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn drain(&mut self) -> HashMap<H256, (Bytes, i32)> { | ||||
| 		let mut data = HashMap::new(); | ||||
| 		mem::swap(&mut self.data, &mut data); | ||||
| 		data | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) { | ||||
| 		if self.raw(key) == None { | ||||
| 			unsafe { | ||||
|  | ||||
| @ -3,9 +3,13 @@ use common::*; | ||||
| #[derive(Debug,Clone,PartialEq,Eq)] | ||||
| /// Diff type for specifying a change (or not).
 | ||||
| pub enum Diff<T> where T: Eq { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Same, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Born(T), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Changed(T, T), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Died(T), | ||||
| } | ||||
| 
 | ||||
| @ -26,6 +30,8 @@ impl<T> Diff<T> where T: Eq { | ||||
| #[derive(PartialEq,Eq,Clone,Copy)] | ||||
| /// Boolean type for clean/dirty status.
 | ||||
| pub enum Filth { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Clean, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Dirty, | ||||
| } | ||||
|  | ||||
| @ -56,12 +56,19 @@ mod service; | ||||
| mod error; | ||||
| mod node; | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use network::host::PeerId; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use network::host::PacketId; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use network::host::NetworkContext; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use network::service::NetworkService; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use network::host::NetworkIoMessage; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use network::host::NetworkIoMessage::User as UserMessage; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub use network::error::NetworkError; | ||||
| 
 | ||||
| use io::TimerToken; | ||||
|  | ||||
| @ -34,6 +34,7 @@ pub struct NibbleSlice<'a> { | ||||
| 	offset_encode_suffix: usize, | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub struct NibbleSliceIterator<'a> { | ||||
| 	p: &'a NibbleSlice<'a>, | ||||
| 	i: usize, | ||||
| @ -76,6 +77,7 @@ impl<'a, 'view> NibbleSlice<'a> where 'a: 'view { | ||||
| 		(r, a.len() + b.len()) | ||||
| 	}*/ | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn iter(&'a self) -> NibbleSliceIterator<'a> { | ||||
| 		NibbleSliceIterator { p: self, i: 0 } | ||||
| 	} | ||||
| @ -130,6 +132,7 @@ impl<'a, 'view> NibbleSlice<'a> where 'a: 'view { | ||||
| 		i | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn encoded(&self, is_leaf: bool) -> Bytes { | ||||
| 		let l = self.len(); | ||||
| 		let mut r = Bytes::with_capacity(l / 2 + 1); | ||||
| @ -142,6 +145,7 @@ impl<'a, 'view> NibbleSlice<'a> where 'a: 'view { | ||||
| 		r | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub fn encoded_leftmost(&self, n: usize, is_leaf: bool) -> Bytes { | ||||
| 		let l = min(self.len(), n); | ||||
| 		let mut r = Bytes::with_capacity(l / 2 + 1); | ||||
|  | ||||
| @ -15,11 +15,11 @@ use rocksdb::{DB, Writable, IteratorMode}; | ||||
| #[derive(Clone)] | ||||
| /// Implementation of the HashDB trait for a disk-backed database with a memory overlay.
 | ||||
| ///
 | ||||
| /// The operations `insert()` and `kill()` take place on the memory overlay; batches of
 | ||||
| /// The operations `insert()` and `remove()` take place on the memory overlay; batches of
 | ||||
| /// such operations may be flushed to the disk-backed DB with `commit()` or discarded with
 | ||||
| /// `revert()`.
 | ||||
| ///
 | ||||
| /// `lookup()` and `exists()` maintain normal behaviour - all `insert()` and `kill()` 
 | ||||
| /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` 
 | ||||
| /// queries have an immediate effect in terms of these functions.
 | ||||
| pub struct OverlayDB { | ||||
| 	overlay: MemoryDB, | ||||
| @ -28,8 +28,11 @@ pub struct OverlayDB { | ||||
| 
 | ||||
| impl OverlayDB { | ||||
| 	/// Create a new instance of OverlayDB given a `backing` database.
 | ||||
| 	pub fn new(backing: DB) -> OverlayDB { | ||||
| 		OverlayDB{ overlay: MemoryDB::new(), backing: Arc::new(backing) } | ||||
| 	pub fn new(backing: DB) -> OverlayDB { Self::new_with_arc(Arc::new(backing)) } | ||||
| 
 | ||||
| 	/// Create a new instance of OverlayDB given a `backing` database.
 | ||||
| 	pub fn new_with_arc(backing: Arc<DB>) -> OverlayDB { | ||||
| 		OverlayDB{ overlay: MemoryDB::new(), backing: backing } | ||||
| 	} | ||||
| 
 | ||||
| 	/// Create a new instance of OverlayDB with an anonymous temporary database.
 | ||||
| @ -68,11 +71,10 @@ impl OverlayDB { | ||||
| 	/// ```
 | ||||
| 	pub fn commit(&mut self) -> Result<u32, UtilError> { | ||||
| 		let mut ret = 0u32; | ||||
| 		let mut deletes = 0usize; | ||||
| 		for i in self.overlay.drain().into_iter() { | ||||
| 			let (key, (value, rc)) = i; | ||||
| 			// until we figure out state trie pruning, only commit stuff when it has a strictly positive delkta of RCs - 
 | ||||
| 			// this prevents RCs being reduced to 0 where the DB would pretent that the node had been removed.
 | ||||
| 			if rc > 0 { | ||||
| 			if rc != 0 { | ||||
| 				match self.payload(&key) { | ||||
| 					Some(x) => { | ||||
| 						let (back_value, back_rc) = x; | ||||
| @ -80,7 +82,7 @@ impl OverlayDB { | ||||
| 						if total_rc < 0 { | ||||
| 							return Err(From::from(BaseDataError::NegativelyReferencedHash)); | ||||
| 						} | ||||
| 						self.put_payload(&key, (back_value, total_rc as u32)); | ||||
| 						deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; | ||||
| 					} | ||||
| 					None => { | ||||
| 						if rc < 0 { | ||||
| @ -92,6 +94,7 @@ impl OverlayDB { | ||||
| 				ret += 1; | ||||
| 			} | ||||
| 		} | ||||
| 		trace!("OverlayDB::commit() deleted {} nodes", deletes); | ||||
| 		Ok(ret) | ||||
| 	} | ||||
| 
 | ||||
| @ -129,11 +132,18 @@ impl OverlayDB { | ||||
| 	} | ||||
| 
 | ||||
| 	/// Get the refs and value of the given key.
 | ||||
| 	fn put_payload(&self, key: &H256, payload: (Bytes, u32)) { | ||||
| 		let mut s = RlpStream::new_list(2); | ||||
| 		s.append(&payload.1); | ||||
| 		s.append(&payload.0); | ||||
| 		self.backing.put(&key.bytes(), &s.out()).expect("Low-level database error. Some issue with your hard disk?"); | ||||
| 	fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { | ||||
| 		if payload.1 > 0 { | ||||
| 			let mut s = RlpStream::new_list(2); | ||||
| 			s.append(&payload.1); | ||||
| 			s.append(&payload.0); | ||||
| 			self.backing.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); | ||||
| 			false | ||||
| 		} else { | ||||
| 			self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); | ||||
| 			true | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -30,10 +30,15 @@ | ||||
| //! * You want to get view onto rlp-slice.
 | ||||
| //! * You don't want to decode whole rlp at once.
 | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod rlptraits; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod rlperrors; | ||||
| /// TODO [debris] Please document me
 | ||||
| pub mod rlpin; | ||||
| /// TODO [debris] Please document me
 | ||||
| pub mod untrusted_rlp; | ||||
| /// TODO [debris] Please document me
 | ||||
| pub mod rlpstream; | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| @ -46,9 +51,13 @@ pub use self::rlpin::{Rlp, RlpIterator}; | ||||
| pub use self::rlpstream::{RlpStream,RlpStandard}; | ||||
| use super::hash::H256; | ||||
| 
 | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub const NULL_RLP: [u8; 1] = [0x80; 1]; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub const EMPTY_LIST_RLP: [u8; 1] = [0xC0; 1]; | ||||
| /// TODO [arkpar] Please document me
 | ||||
| pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] ); | ||||
| /// TODO [debris] Please document me
 | ||||
| pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] ); | ||||
| 
 | ||||
| /// Shortcut function to decode trusted rlp
 | ||||
|  | ||||
| @ -3,14 +3,23 @@ use std::error::Error as StdError; | ||||
| use bytes::FromBytesError; | ||||
| 
 | ||||
| #[derive(Debug, PartialEq, Eq)] | ||||
| /// TODO [debris] Please document me
 | ||||
| pub enum DecoderError { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	FromBytesError(FromBytesError), | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	RlpIsTooShort, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	RlpExpectedToBeList, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	RlpExpectedToBeData, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	RlpIncorrectListLen, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	RlpDataLenWithZeroPrefix, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	RlpListLenWithZeroPrefix, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	RlpInvalidIndirection, | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -103,10 +103,12 @@ impl <'a, 'view> Rlp<'a> where 'a: 'view { | ||||
| 		res.unwrap_or_else(|_| panic!()) | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub fn as_val<T>(&self) -> T where T: Decodable { | ||||
| 		Self::view_as_val(self) | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub fn val_at<T>(&self, index: usize) -> T where T: Decodable { | ||||
| 		Self::view_as_val(&self.at(index)) | ||||
| 	} | ||||
|  | ||||
| @ -142,6 +142,14 @@ impl RlpStream { | ||||
| 			self.note_appended(1); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	/// Drain the object and return the underlying ElasticArray.
 | ||||
| 	pub fn drain(self) -> ElasticArray1024<u8> { | ||||
| 		match self.is_finished() { | ||||
| 			true => self.encoder.bytes, | ||||
| 			false => panic!() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| struct BasicEncoder { | ||||
| @ -215,15 +223,19 @@ impl Encoder for BasicEncoder { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub trait RlpStandard { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn rlp_append(&self, s: &mut RlpStream); | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn rlp_bytes(&self) -> Bytes { | ||||
| 		let mut s = RlpStream::new(); | ||||
| 		self.rlp_append(&mut s); | ||||
| 		s.out() | ||||
| 	} | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn rlp_sha3(&self) -> H256 { self.rlp_bytes().sha3() } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -1,23 +1,36 @@ | ||||
| use rlp::{DecoderError, UntrustedRlp}; | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait Decoder: Sized { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn read_value<T, F>(&self, f: F) -> Result<T, DecoderError> | ||||
| 		where F: FnOnce(&[u8]) -> Result<T, DecoderError>; | ||||
| 
 | ||||
| 	/// TODO [arkpar] Please document me
 | ||||
| 	fn as_list(&self) -> Result<Vec<Self>, DecoderError>; | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn as_rlp<'a>(&'a self) -> &'a UntrustedRlp<'a>; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn as_raw(&self) -> &[u8]; | ||||
| } | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait Decodable: Sized { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn decode<D>(decoder: &D) -> Result<Self, DecoderError>  where D: Decoder; | ||||
| } | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait View<'a, 'view>: Sized { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	type Prototype; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	type PayloadInfo; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	type Data; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	type Item; | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	type Iter; | ||||
| 
 | ||||
| 	/// Creates a new instance of `Rlp` reader
 | ||||
| @ -41,8 +54,10 @@ pub trait View<'a, 'view>: Sized { | ||||
| 	/// Get the prototype of the RLP.
 | ||||
| 	fn prototype(&self) -> Self::Prototype; | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn payload_info(&self) -> Self::PayloadInfo; | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn data(&'view self) -> Self::Data; | ||||
| 
 | ||||
| 	/// Returns number of RLP items.
 | ||||
| @ -179,21 +194,30 @@ pub trait View<'a, 'view>: Sized { | ||||
| 	/// ```
 | ||||
| 	fn iter(&'view self) -> Self::Iter; | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn as_val<T>(&self) -> Result<T, DecoderError> where T: Decodable; | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn val_at<T>(&self, index: usize) -> Result<T, DecoderError> where T: Decodable; | ||||
| } | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait Encoder { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn emit_value(&mut self, bytes: &[u8]) -> (); | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn emit_list<F>(&mut self, f: F) -> () where F: FnOnce(&mut Self) -> (); | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn emit_raw(&mut self, bytes: &[u8]) -> (); | ||||
| } | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait Encodable { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn encode<E>(&self, encoder: &mut E) -> () where E: Encoder; | ||||
| } | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait Stream: Sized { | ||||
| 
 | ||||
| 	/// Initializes instance of empty `Stream`.
 | ||||
| @ -284,6 +308,7 @@ pub trait Stream: Sized { | ||||
| 	/// }
 | ||||
| 	fn is_finished(&self) -> bool; | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn as_raw(&self) -> &[u8]; | ||||
| 
 | ||||
| 	/// Streams out encoded bytes.
 | ||||
|  | ||||
| @ -21,15 +21,21 @@ impl OffsetCache { | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug)] | ||||
| /// TODO [debris] Please document me
 | ||||
| pub enum Prototype { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	Null, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	Data(usize), | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	List(usize), | ||||
| } | ||||
| 
 | ||||
| /// Stores basic information about item
 | ||||
| pub struct PayloadInfo { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub header_len: usize, | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	pub value_len: usize, | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -6,6 +6,7 @@ use bytes::{BytesConvertable, Populatable}; | ||||
| use hash::{H256, FixedHash}; | ||||
| use self::sha3_ext::*; | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] ); | ||||
| 
 | ||||
| 
 | ||||
|  | ||||
| @ -36,6 +36,7 @@ use heapsize::HeapSizeOf; | ||||
| 
 | ||||
| /// Should be used to squeeze collections to certain size in bytes
 | ||||
| pub trait Squeeze { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn squeeze(&mut self, size: usize); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -1,10 +1,17 @@ | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod trietraits; | ||||
| pub mod standardmap; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod journal; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod node; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod triedb; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod triedbmut; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod sectriedb; | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub mod sectriedbmut; | ||||
| 
 | ||||
| pub use self::trietraits::*; | ||||
|  | ||||
| @ -7,9 +7,13 @@ use super::journal::*; | ||||
| /// Type of node in the trie and essential information thereof.
 | ||||
| #[derive(Clone, Eq, PartialEq, Debug)] | ||||
| pub enum Node<'a> { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Empty, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Leaf(NibbleSlice<'a>, &'a[u8]), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Extension(NibbleSlice<'a>, &'a[u8]), | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Branch([&'a[u8]; 16], Option<&'a [u8]>) | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -7,9 +7,13 @@ use hash::*; | ||||
| 
 | ||||
| /// Alphabet to use when creating words for insertion into tries.
 | ||||
| pub enum Alphabet { | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	All, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Low, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Mid, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	Custom(Bytes), | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -34,6 +34,7 @@ use super::node::*; | ||||
| pub struct TrieDB<'db> { | ||||
| 	db: &'db HashDB, | ||||
| 	root: &'db H256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub hash_count: usize, | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -40,6 +40,7 @@ use super::trietraits::*; | ||||
| pub struct TrieDBMut<'db> { | ||||
| 	db: &'db mut HashDB, | ||||
| 	root: &'db mut H256, | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	pub hash_count: usize, | ||||
| } | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										245
									
								
								util/src/uint.rs
									
									
									
									
									
								
							
							
						
						
									
										245
									
								
								util/src/uint.rs
									
									
									
									
									
								
							| @ -23,7 +23,6 @@ | ||||
| 
 | ||||
| use standard::*; | ||||
| use from_json::*; | ||||
| use std::num::wrapping::OverflowingOps; | ||||
| 
 | ||||
| macro_rules! impl_map_from { | ||||
| 	($thing:ident, $from:ty, $to:ty) => { | ||||
| @ -59,15 +58,20 @@ macro_rules! panic_on_overflow { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub trait Uint: Sized + Default + FromStr + From<u64> + FromJson + fmt::Debug + fmt::Display + PartialOrd + Ord + PartialEq + Eq + Hash { | ||||
| 
 | ||||
| 	/// Size of this type.
 | ||||
| 	const SIZE: usize; | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn zero() -> Self; | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn one() -> Self; | ||||
| 
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	type FromDecStrErr; | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn from_dec_str(value: &str) -> Result<Self, Self::FromDecStrErr>; | ||||
| 
 | ||||
| 	/// Conversion to u32
 | ||||
| @ -97,6 +101,28 @@ pub trait Uint: Sized + Default + FromStr + From<u64> + FromJson + fmt::Debug + | ||||
| 	fn pow(self, other: Self) -> Self; | ||||
| 	/// Return wrapped eponentation `self**other` and flag if there was an overflow
 | ||||
| 	fn overflowing_pow(self, other: Self) -> (Self, bool); | ||||
| 
 | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn overflowing_add(self, other: Self) -> (Self, bool); | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn overflowing_sub(self, other: Self) -> (Self, bool); | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn overflowing_mul(self, other: Self) -> (Self, bool); | ||||
| 	
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn overflowing_div(self, other: Self) -> (Self, bool); | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn overflowing_rem(self, other: Self) -> (Self, bool); | ||||
| 
 | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn overflowing_neg(self) -> (Self, bool); | ||||
| 	
 | ||||
| 	/// TODO [Gav Wood] Please document me
 | ||||
| 	fn overflowing_shl(self, shift: u32) -> (Self, bool); | ||||
| } | ||||
| 
 | ||||
| macro_rules! construct_uint { | ||||
| @ -259,6 +285,98 @@ macro_rules! construct_uint { | ||||
| 				let res = overflowing!(x.overflowing_mul(y), overflow); | ||||
| 				(res, overflow) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_add(self, other: $name) -> ($name, bool) { | ||||
| 				let $name(ref me) = self; | ||||
| 				let $name(ref you) = other; | ||||
| 				let mut ret = [0u64; $n_words]; | ||||
| 				let mut carry = [0u64; $n_words]; | ||||
| 				let mut b_carry = false; | ||||
| 				let mut overflow = false; | ||||
| 
 | ||||
| 				for i in 0..$n_words { | ||||
| 					ret[i] = me[i].wrapping_add(you[i]); | ||||
| 
 | ||||
| 					if ret[i] < me[i] { | ||||
| 						if i < $n_words - 1 { | ||||
| 							carry[i + 1] = 1; | ||||
| 							b_carry = true; | ||||
| 						} else { | ||||
| 							overflow = true; | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 				if b_carry { | ||||
| 					let ret = overflowing!($name(ret).overflowing_add($name(carry)), overflow); | ||||
| 					(ret, overflow) | ||||
| 				} else { 
 | ||||
| 					($name(ret), overflow) | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_sub(self, other: $name) -> ($name, bool) { | ||||
| 				let res = overflowing!((!other).overflowing_add(From::from(1u64))); | ||||
| 				let res = overflowing!(self.overflowing_add(res)); | ||||
| 				(res, self < other) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_mul(self, other: $name) -> ($name, bool) { | ||||
| 				let mut res = $name::from(0u64); | ||||
| 				let mut overflow = false; | ||||
| 				// TODO: be more efficient about this
 | ||||
| 				for i in 0..(2 * $n_words) { | ||||
| 					let v = overflowing!(self.overflowing_mul_u32((other >> (32 * i)).low_u32()), overflow); | ||||
| 					let res2 = overflowing!(v.overflowing_shl(32 * i as u32), overflow); | ||||
| 					res = overflowing!(res.overflowing_add(res2), overflow); | ||||
| 				} | ||||
| 				(res, overflow) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_div(self, other: $name) -> ($name, bool) { | ||||
| 				(self / other, false) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_rem(self, other: $name) -> ($name, bool) { | ||||
| 				(self % other, false) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_neg(self) -> ($name, bool) { | ||||
| 				(!self, true) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_shl(self, shift32: u32) -> ($name, bool) { | ||||
| 				let $name(ref original) = self; | ||||
| 				let mut ret = [0u64; $n_words]; | ||||
| 				let shift = shift32 as usize; | ||||
| 				let word_shift = shift / 64; | ||||
| 				let bit_shift = shift % 64; | ||||
| 				for i in 0..$n_words { | ||||
| 					// Shift
 | ||||
| 					if i + word_shift < $n_words { | ||||
| 						ret[i + word_shift] += original[i] << bit_shift; | ||||
| 					} | ||||
| 					// Carry
 | ||||
| 					if bit_shift > 0 && i + word_shift + 1 < $n_words { | ||||
| 						ret[i + word_shift + 1] += original[i] >> (64 - bit_shift); | ||||
| 					} | ||||
| 				} | ||||
| 				// Detecting overflow
 | ||||
| 				let last = $n_words - word_shift - if bit_shift > 0 { 1 } else { 0 }; | ||||
| 				let overflow = if bit_shift > 0 { | ||||
| 					(original[last] >> (64 - bit_shift)) > 0 | ||||
| 				} else if word_shift > 0 { | ||||
| 					original[last] > 0 | ||||
| 				} else { | ||||
| 					false | ||||
| 				}; | ||||
| 
 | ||||
| 				for i in last+1..$n_words-1 { | ||||
| 					if original[i] > 0 { | ||||
| 						return ($name(ret), true); | ||||
| 					} | ||||
| 				} | ||||
| 				($name(ret), overflow) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		impl $name { | ||||
| @ -390,105 +508,6 @@ macro_rules! construct_uint { | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		impl OverflowingOps for $name { | ||||
| 			fn overflowing_add(self, other: $name) -> ($name, bool) { | ||||
| 				let $name(ref me) = self; | ||||
| 				let $name(ref you) = other; | ||||
| 				let mut ret = [0u64; $n_words]; | ||||
| 				let mut carry = [0u64; $n_words]; | ||||
| 				let mut b_carry = false; | ||||
| 				let mut overflow = false; | ||||
| 
 | ||||
| 				for i in 0..$n_words { | ||||
| 					ret[i] = me[i].wrapping_add(you[i]); | ||||
| 
 | ||||
| 					if ret[i] < me[i] { | ||||
| 						if i < $n_words - 1 { | ||||
| 							carry[i + 1] = 1; | ||||
| 							b_carry = true; | ||||
| 						} else { | ||||
| 							overflow = true; | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 				if b_carry { | ||||
| 					let ret = overflowing!($name(ret).overflowing_add($name(carry)), overflow); | ||||
| 					(ret, overflow) | ||||
| 				} else { 
 | ||||
| 					($name(ret), overflow) | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_sub(self, other: $name) -> ($name, bool) { | ||||
| 				let res = overflowing!((!other).overflowing_add(From::from(1u64))); | ||||
| 				let res = overflowing!(self.overflowing_add(res)); | ||||
| 				(res, self < other) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_mul(self, other: $name) -> ($name, bool) { | ||||
| 				let mut res = $name::from(0u64); | ||||
| 				let mut overflow = false; | ||||
| 				// TODO: be more efficient about this
 | ||||
| 				for i in 0..(2 * $n_words) { | ||||
| 					let v = overflowing!(self.overflowing_mul_u32((other >> (32 * i)).low_u32()), overflow); | ||||
| 					let res2 = overflowing!(v.overflowing_shl(32 * i as u32), overflow); | ||||
| 					res = overflowing!(res.overflowing_add(res2), overflow); | ||||
| 				} | ||||
| 				(res, overflow) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_div(self, other: $name) -> ($name, bool) { | ||||
| 				(self / other, false) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_rem(self, other: $name) -> ($name, bool) { | ||||
| 				(self % other, false) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_neg(self) -> ($name, bool) { | ||||
| 				(!self, true) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_shl(self, shift32: u32) -> ($name, bool) { | ||||
| 				let $name(ref original) = self; | ||||
| 				let mut ret = [0u64; $n_words]; | ||||
| 				let shift = shift32 as usize; | ||||
| 				let word_shift = shift / 64; | ||||
| 				let bit_shift = shift % 64; | ||||
| 				for i in 0..$n_words { | ||||
| 					// Shift
 | ||||
| 					if i + word_shift < $n_words { | ||||
| 						ret[i + word_shift] += original[i] << bit_shift; | ||||
| 					} | ||||
| 					// Carry
 | ||||
| 					if bit_shift > 0 && i + word_shift + 1 < $n_words { | ||||
| 						ret[i + word_shift + 1] += original[i] >> (64 - bit_shift); | ||||
| 					} | ||||
| 				} | ||||
| 				// Detecting overflow
 | ||||
| 				let last = $n_words - word_shift - if bit_shift > 0 { 1 } else { 0 }; | ||||
| 				let overflow = if bit_shift > 0 { | ||||
| 					(original[last] >> (64 - bit_shift)) > 0 | ||||
| 				} else if word_shift > 0 { | ||||
| 					original[last] > 0 | ||||
| 				} else { | ||||
| 					false | ||||
| 				}; | ||||
| 
 | ||||
| 				for i in last+1..$n_words-1 { | ||||
| 					if original[i] > 0 { | ||||
| 						return ($name(ret), true); | ||||
| 					} | ||||
| 				} | ||||
| 				($name(ret), overflow) | ||||
| 			} | ||||
| 
 | ||||
| 			fn overflowing_shr(self, _shift32: u32) -> ($name, bool) { | ||||
| 				// TODO [todr] not used for now
 | ||||
| 				unimplemented!(); | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		impl Add<$name> for $name { | ||||
| 			type Output = $name; | ||||
| 
 | ||||
| @ -907,15 +926,17 @@ impl From<U256> for u32 { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub const ZERO_U256: U256 = U256([0x00u64; 4]); | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub const ONE_U256: U256 = U256([0x01u64, 0x00u64, 0x00u64, 0x00u64]); | ||||
| /// TODO [Gav Wood] Please document me
 | ||||
| pub const BAD_U256: U256 = U256([0xffffffffffffffffu64; 4]); | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
| 	use uint::{Uint, U128, U256, U512}; | ||||
| 	use std::str::FromStr; | ||||
| 	use std::num::wrapping::OverflowingOps; | ||||
| 
 | ||||
| 	#[test] | ||||
| 	pub fn assign_ops() { | ||||
| @ -1297,28 +1318,6 @@ mod tests { | ||||
| 		); | ||||
| 	} | ||||
| 
 | ||||
| 	#[ignore] | ||||
| 	#[test] | ||||
| 	pub fn uint256_shr_overflow() { | ||||
| 		assert_eq!( | ||||
| 			U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() | ||||
| 			.overflowing_shr(4), | ||||
| 			(U256::from_str("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), true) | ||||
| 		); | ||||
| 	} | ||||
| 
 | ||||
| 	#[ignore] | ||||
| 	#[test] | ||||
| 	pub fn uint256_shr_overflow2() { | ||||
| 		assert_eq!( | ||||
| 			U256::from_str("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0").unwrap() | ||||
| 			.overflowing_shr(4), | ||||
| 			(U256::from_str("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), false) | ||||
| 		); | ||||
| 	} | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 	#[test] | ||||
| 	pub fn uint256_mul() { | ||||
| 		assert_eq!( | ||||
|  | ||||
| @ -2,7 +2,9 @@ | ||||
| 
 | ||||
| use std::ptr; | ||||
| 
 | ||||
| /// TODO [debris] Please document me
 | ||||
| pub trait InsertSlice<T> { | ||||
| 				/// TODO [debris] Please document me
 | ||||
|     fn insert_slice(&mut self, index: usize, elements: &[T]); | ||||
| } | ||||
| 
 | ||||
| @ -47,6 +49,7 @@ impl<T> InsertSlice<T> for Vec<T> { | ||||
| ///	}
 | ||||
| /// ```
 | ||||
| pub trait SharedPrefix <T> { | ||||
| 	/// TODO [debris] Please document me
 | ||||
| 	fn shared_prefix_len(&self, elem: &[T]) -> usize; | ||||
| } | ||||
| 
 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user