Merge branch 'master' into miner-spec-refact

Conflicts:
	ethcore/src/client/client.rs
	ethcore/src/client/mod.rs
	ethcore/src/client/test_client.rs
	miner/src/miner.rs
This commit is contained in:
Nikolay Volf 2016-05-19 03:51:05 +03:00
commit 6c6bbe9c57
57 changed files with 821 additions and 269 deletions

44
Cargo.lock generated
View File

@ -2,7 +2,7 @@
name = "parity"
version = "1.2.0"
dependencies = [
"clippy 0.0.64 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.67 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
@ -119,7 +119,7 @@ dependencies = [
[[package]]
name = "clippy"
version = "0.0.64"
version = "0.0.67"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -147,11 +147,6 @@ dependencies = [
"url 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "crossbeam"
version = "0.2.9"
@ -235,8 +230,8 @@ name = "ethcore"
version = "1.2.0"
dependencies = [
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.64 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.67 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.2.0",
"ethcore-devtools 1.2.0",
@ -296,7 +291,7 @@ dependencies = [
name = "ethcore-rpc"
version = "1.2.0"
dependencies = [
"clippy 0.0.64 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.67 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.2.0",
"ethcore 1.2.0",
"ethcore-util 1.2.0",
@ -321,7 +316,7 @@ dependencies = [
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"bigint 0.1.0",
"chrono 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.64 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.67 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -354,15 +349,16 @@ dependencies = [
name = "ethcore-webapp"
version = "1.2.0"
dependencies = [
"clippy 0.0.64 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.67 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-rpc 1.2.0",
"ethcore-util 1.2.0",
"hyper 0.9.3 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 2.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 5.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-status 0.3.7 (git+https://github.com/ethcore/parity-status.git)",
"parity-wallet 0.2.0 (git+https://github.com/ethcore/parity-wallet.git)",
"parity-idmanager 0.1.3 (git+https://github.com/ethcore/parity-idmanager-rs.git)",
"parity-status 0.4.1 (git+https://github.com/ethcore/parity-status.git)",
"parity-wallet 0.3.0 (git+https://github.com/ethcore/parity-wallet.git)",
"parity-webapp 0.1.0 (git+https://github.com/ethcore/parity-webapp.git)",
"url 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -383,7 +379,7 @@ dependencies = [
name = "ethminer"
version = "1.2.0"
dependencies = [
"clippy 0.0.64 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.67 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.2.0",
"ethcore-util 1.2.0",
@ -397,7 +393,7 @@ dependencies = [
name = "ethsync"
version = "1.2.0"
dependencies = [
"clippy 0.0.64 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.67 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.2.0",
"ethcore-util 1.2.0",
@ -833,18 +829,26 @@ name = "odds"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "parity-idmanager"
version = "0.1.3"
source = "git+https://github.com/ethcore/parity-idmanager-rs.git#efb69592b87854f41d8882de75982c8f1e748666"
dependencies = [
"parity-webapp 0.1.0 (git+https://github.com/ethcore/parity-webapp.git)",
]
[[package]]
name = "parity-status"
version = "0.3.7"
source = "git+https://github.com/ethcore/parity-status.git#b0ae32a7fe2f843e4e22dc38903fd2c3e7fb0763"
version = "0.4.1"
source = "git+https://github.com/ethcore/parity-status.git#f121ebd1f49986545d9fc262ba210cdf07039e6d"
dependencies = [
"parity-webapp 0.1.0 (git+https://github.com/ethcore/parity-webapp.git)",
]
[[package]]
name = "parity-wallet"
version = "0.2.0"
source = "git+https://github.com/ethcore/parity-wallet.git#18a602fd25f3e9bcdbc5528bf61ba627665d962c"
version = "0.3.0"
source = "git+https://github.com/ethcore/parity-wallet.git#664fd2b85dd94ca184868bd3965e14a4ba68c03f"
dependencies = [
"parity-webapp 0.1.0 (git+https://github.com/ethcore/parity-webapp.git)",
]

View File

@ -23,7 +23,7 @@ daemonize = "0.2"
num_cpus = "0.2"
number_prefix = "0.2"
rpassword = "0.2.1"
clippy = { version = "0.0.64", optional = true}
clippy = { version = "0.0.67", optional = true}
ethcore = { path = "ethcore" }
ethcore-util = { path = "util" }
ethsync = { path = "sync" }

View File

@ -22,8 +22,8 @@ ethcore-util = { path = "../util" }
evmjit = { path = "../evmjit", optional = true }
ethash = { path = "../ethash" }
num_cpus = "0.2"
clippy = { version = "0.0.64", optional = true}
crossbeam = "0.1.5"
clippy = { version = "0.0.67", optional = true}
crossbeam = "0.2.9"
lazy_static = "0.1"
ethcore-devtools = { path = "../devtools" }
ethjson = { path = "../json" }

View File

@ -342,7 +342,6 @@ mod tests {
#[test]
fn new_account() {
use rustc_serialize::hex::ToHex;
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
@ -354,7 +353,6 @@ mod tests {
#[test]
fn create_account() {
use rustc_serialize::hex::ToHex;
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");

View File

@ -21,7 +21,7 @@ use util::keys::store::AccountProvider;
use block::*;
use spec::{CommonParams, Spec};
use engine::*;
use evm::{Schedule, Factory};
use evm::Schedule;
use ethjson;
/// `BasicAuthority` params.
@ -51,7 +51,6 @@ pub struct BasicAuthority {
params: CommonParams,
our_params: BasicAuthorityParams,
builtins: BTreeMap<Address, Builtin>,
factory: Factory,
}
impl BasicAuthority {
@ -61,7 +60,6 @@ impl BasicAuthority {
params: params,
our_params: our_params,
builtins: builtins,
factory: Factory::default(),
}
}
}
@ -78,8 +76,6 @@ impl Engine for BasicAuthority {
/// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, _header: &Header) -> HashMap<String, String> { hash_map!["signature".to_owned() => "TODO".to_owned()] }
fn vm_factory(&self) -> &Factory { &self.factory }
fn schedule(&self, _env_info: &EnvInfo) -> Schedule {
Schedule::new_homestead()
}
@ -200,7 +196,6 @@ mod tests {
use super::*;
use common::*;
use block::*;
use engine::*;
use tests::helpers::*;
use util::keys::{TestAccountProvider, TestAccount};
@ -211,12 +206,6 @@ mod tests {
assert!(engine.version().major >= 1);
}
#[test]
fn can_return_factory() {
let engine = new_test_authority().engine;
engine.vm_factory();
}
#[test]
fn can_return_schedule() {
let engine = new_test_authority().engine;
@ -288,7 +277,8 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let b = OpenBlock::new(engine.deref(), false, db, &genesis_header, last_hashes, addr.clone(), x!(3141562), vec![]);
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, addr.clone(), x!(3141562), vec![]);
let b = b.close_and_lock();
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();

View File

@ -23,6 +23,7 @@ use engine::*;
use state::*;
use verification::PreverifiedBlock;
use trace::Trace;
use evm::Factory as EvmFactory;
/// A block, encoded as it is on the block chain.
#[derive(Default, Debug, Clone)]
@ -190,6 +191,7 @@ impl IsBlock for ExecutedBlock {
pub struct OpenBlock<'x> {
block: ExecutedBlock,
engine: &'x Engine,
vm_factory: &'x EvmFactory,
last_hashes: LastHashes,
}
@ -226,10 +228,11 @@ pub struct SealedBlock {
impl<'x> OpenBlock<'x> {
#[cfg_attr(feature="dev", allow(too_many_arguments))]
/// Create a new `OpenBlock` ready for transaction pushing.
pub fn new(engine: &'x Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self {
pub fn new(engine: &'x Engine, vm_factory: &'x EvmFactory, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self {
let mut r = OpenBlock {
block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()), tracing),
engine: engine,
vm_factory: vm_factory,
last_hashes: last_hashes,
};
@ -308,7 +311,7 @@ impl<'x> OpenBlock<'x> {
let env_info = self.env_info();
// info!("env_info says gas_used={}", env_info.gas_used);
match self.block.state.apply(&env_info, self.engine, &t, self.block.traces.is_some()) {
match self.block.state.apply(&env_info, self.engine, self.vm_factory, &t, self.block.traces.is_some()) {
Ok(outcome) => {
self.block.transactions_set.insert(h.unwrap_or_else(||t.hash()));
self.block.base.transactions.push(t);
@ -393,13 +396,14 @@ impl ClosedBlock {
}
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
pub fn reopen(self, engine: &Engine) -> OpenBlock {
pub fn reopen<'a>(self, engine: &'a Engine, vm_factory: &'a EvmFactory) -> OpenBlock<'a> {
// revert rewards (i.e. set state back at last transaction's state).
let mut block = self.block;
block.state = self.unclosed_state;
OpenBlock {
block: block,
engine: engine,
vm_factory: vm_factory,
last_hashes: self.last_hashes,
}
}
@ -457,7 +461,7 @@ impl IsBlock for SealedBlock {
/// Enact the block given by block header, transactions and uncles
#[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<LockedBlock, Error> {
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
{
if ::log::max_log_level() >= ::log::LogLevel::Trace {
let s = State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce());
@ -465,7 +469,7 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
}
}
let mut b = OpenBlock::new(engine, tracing, db, parent, last_hashes, header.author().clone(), x!(3141562), header.extra_data().clone());
let mut b = OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, header.author().clone(), x!(3141562), header.extra_data().clone());
b.set_difficulty(*header.difficulty());
b.set_gas_limit(*header.gas_limit());
b.set_timestamp(header.timestamp());
@ -475,22 +479,22 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<LockedBlock, Error> {
pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
let block = BlockView::new(block_bytes);
let header = block.header();
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes)
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, vm_factory)
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<LockedBlock, Error> {
pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
let view = BlockView::new(&block.bytes);
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes)
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, vm_factory)
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<SealedBlock, Error> {
pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<SealedBlock, Error> {
let header = BlockView::new(block_bytes).header_view();
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes)).seal(engine, header.seal())))
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, vm_factory)).seal(engine, header.seal())))
}
#[cfg(test)]
@ -509,7 +513,8 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let b = OpenBlock::new(engine.deref(), false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let b = b.close_and_lock();
let _ = b.seal(engine.deref(), vec![]);
}
@ -524,14 +529,15 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let b = OpenBlock::new(engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]).close_and_lock().seal(engine.deref(), vec![]).unwrap();
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]).close_and_lock().seal(engine.deref(), vec![]).unwrap();
let orig_bytes = b.rlp_bytes();
let orig_db = b.drain();
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()]).unwrap();
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default()).unwrap();
assert_eq!(e.rlp_bytes(), orig_bytes);
@ -550,7 +556,8 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let mut open_block = OpenBlock::new(engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]);
let vm_factory = Default::default();
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]);
let mut uncle1_header = Header::new();
uncle1_header.extra_data = b"uncle1".to_vec();
let mut uncle2_header = Header::new();
@ -565,7 +572,7 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()]).unwrap();
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default()).unwrap();
let bytes = e.rlp_bytes();
assert_eq!(bytes, orig_bytes);

View File

@ -46,6 +46,7 @@ use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Databa
use trace;
pub use types::blockchain_info::BlockChainInfo;
pub use types::block_status::BlockStatus;
use evm::Factory as EvmFactory;
impl fmt::Display for BlockChainInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -87,6 +88,7 @@ pub struct Client<V = CanonVerifier> where V: Verifier {
import_lock: Mutex<()>,
panic_handler: Arc<PanicHandler>,
verifier: PhantomData<V>,
vm_factory: Arc<EvmFactory>,
}
const HISTORY: u64 = 1200;
@ -151,6 +153,7 @@ impl<V> Client<V> where V: Verifier {
import_lock: Mutex::new(()),
panic_handler: panic_handler,
verifier: PhantomData,
vm_factory: Arc::new(EvmFactory::new(config.vm_type)),
})
}
@ -204,7 +207,7 @@ impl<V> Client<V> where V: Verifier {
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
let db = self.state_db.lock().unwrap().boxed_clone();
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes);
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory);
if let Err(e) = enact_result {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(());
@ -422,7 +425,7 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
state.sub_balance(&sender, &balance);
state.add_balance(&sender, &U256::max_value());
let options = TransactOptions { tracing: false, check_nonce: false };
Executive::new(&mut state, &env_info, self.engine.deref().deref()).transact(t, options)
Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options)
}
// TODO [todr] Should be moved to miner crate eventually.
@ -430,6 +433,10 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
block.try_seal(self.engine.deref().deref(), seal)
}
fn vm_factory(&self) -> &EvmFactory {
&self.vm_factory
}
// TODO [todr] Should be moved to miner crate eventually.
fn prepare_sealing(&self, author: Address, gas_floor_target: U256, extra_data: Bytes, transactions: Vec<SignedTransaction>)
-> (Option<ClosedBlock>, HashSet<H256>) {
@ -439,6 +446,7 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
let mut b = OpenBlock::new(
engine,
&self.vm_factory,
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
self.state_db.lock().unwrap().boxed_clone(),
match self.chain.block_header(&h) { Some(ref x) => x, None => { return (None, invalid_transactions) } },

View File

@ -17,6 +17,7 @@
pub use block_queue::BlockQueueConfig;
pub use blockchain::BlockChainConfig;
pub use trace::{Config as TraceConfig, Switch};
pub use evm::VMType;
use util::journaldb;
/// Client configuration. Includes configs for all sub-systems.
@ -28,6 +29,8 @@ pub struct ClientConfig {
pub blockchain: BlockChainConfig,
/// Trace configuration.
pub tracing: TraceConfig,
/// VM type.
pub vm_type: VMType,
/// The JournalDB ("pruning") algorithm to use.
pub pruning: journaldb::Algorithm,
/// The name of the client instance.

View File

@ -22,7 +22,7 @@ mod test_client;
mod trace;
pub use self::client::*;
pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig, Switch};
pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig, Switch, VMType};
pub use types::ids::*;
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
pub use self::trace::Filter as TraceFilter;
@ -43,6 +43,7 @@ use filter::Filter;
use error::{ImportResult, ExecutionError};
use receipt::LocalizedReceipt;
use trace::LocalizedTrace;
use evm::Factory as EvmFactory;
/// Blockchain database client. Owns and manages a blockchain and a block queue.
pub trait BlockChainClient : Sync + Send {
@ -132,6 +133,9 @@ pub trait BlockChainClient : Sync + Send {
/// Makes a non-persistent transaction call.
fn call(&self, t: &SignedTransaction) -> Result<Executed, ExecutionError>;
/// Returns EvmFactory.
fn vm_factory(&self) -> &EvmFactory;
/// Returns traces matching given filter.
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>>;

View File

@ -27,6 +27,7 @@ use log_entry::LocalizedLogEntry;
use receipt::{Receipt, LocalizedReceipt};
use extras::BlockReceipts;
use error::{ImportResult};
use evm::Factory as EvmFactory;
use block_queue::BlockQueueInfo;
use block::{SealedBlock, ClosedBlock, LockedBlock};
@ -429,6 +430,10 @@ impl BlockChainClient for TestBlockChainClient {
}
}
fn vm_factory(&self) -> &EvmFactory {
unimplemented!();
}
fn filter_traces(&self, _filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
unimplemented!();
}

View File

@ -20,7 +20,7 @@ use common::*;
use util::keys::store::AccountProvider;
use block::ExecutedBlock;
use spec::CommonParams;
use evm::{Schedule, Factory};
use evm::Schedule;
/// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based.
/// Provides hooks into each of the major parts of block import.
@ -39,9 +39,6 @@ pub trait Engine : Sync + Send {
/// Get the general parameters of the chain.
fn params(&self) -> &CommonParams;
/// Get current EVM factory
fn vm_factory(&self) -> &Factory;
/// Get the EVM schedule for the given `env_info`.
fn schedule(&self, env_info: &EnvInfo) -> Schedule;

View File

@ -21,7 +21,7 @@ use common::*;
use block::*;
use spec::CommonParams;
use engine::*;
use evm::{Schedule, Factory};
use evm::Schedule;
use ethjson;
/// Ethash params.
@ -64,7 +64,6 @@ pub struct Ethash {
ethash_params: EthashParams,
builtins: BTreeMap<Address, Builtin>,
pow: EthashManager,
factory: Factory,
}
impl Ethash {
@ -75,7 +74,6 @@ impl Ethash {
ethash_params: ethash_params,
builtins: builtins,
pow: EthashManager::new(),
factory: Factory::default(),
}
}
}
@ -93,10 +91,8 @@ impl Engine for Ethash {
}
/// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() }
fn vm_factory(&self) -> &Factory {
&self.factory
fn extra_info(&self, header: &Header) -> HashMap<String, String> {
hash_map!["nonce".to_owned() => format!("0x{}", header.nonce().hex()), "mixHash".to_owned() => format!("0x{}", header.mix_hash().hex())]
}
fn schedule(&self, env_info: &EnvInfo) -> Schedule {
@ -299,7 +295,6 @@ mod tests {
use common::*;
use block::*;
use engine::*;
use tests::helpers::*;
use super::super::new_morden;
@ -312,7 +307,8 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let b = OpenBlock::new(engine.deref(), false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let b = b.close();
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
}
@ -326,7 +322,8 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let mut b = OpenBlock::new(engine.deref(), false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let vm_factory = Default::default();
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let mut uncle = Header::new();
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
uncle.author = uncle_author.clone();
@ -344,12 +341,6 @@ mod tests {
assert!(engine.version().major >= 1);
}
#[test]
fn can_return_factory() {
let engine = new_morden().engine;
engine.vm_factory();
}
#[test]
fn can_return_schedule() {
let engine = new_morden().engine;

View File

@ -51,7 +51,6 @@ pub fn new_morden() -> Spec { Spec::load(include_bytes!("../../res/ethereum/mord
mod tests {
use common::*;
use state::*;
use engine::*;
use super::*;
use tests::helpers::*;

View File

@ -17,11 +17,10 @@
//! Evm factory.
//!
//! TODO: consider spliting it into two separate files.
#[cfg(test)]
use std::fmt;
use evm::Evm;
#[derive(Clone)]
#[derive(Debug, Clone)]
/// Type of EVM to use.
pub enum VMType {
/// JIT EVM
@ -31,7 +30,6 @@ pub enum VMType {
Interpreter
}
#[cfg(test)]
impl fmt::Display for VMType {
#[cfg(feature="jit")]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -48,8 +46,12 @@ impl fmt::Display for VMType {
}
}
#[cfg(test)]
#[cfg(feature = "json-tests")]
impl Default for VMType {
fn default() -> Self {
VMType::Interpreter
}
}
impl VMType {
/// Return all possible VMs (JIT, Interpreter)
#[cfg(feature = "jit")]
@ -62,6 +64,18 @@ impl VMType {
pub fn all() -> Vec<VMType> {
vec![VMType::Interpreter]
}
/// Return new jit if it's possible
#[cfg(not(feature = "jit"))]
pub fn jit() -> Option<Self> {
None
}
/// Return new jit if it's possible
#[cfg(feature = "jit")]
pub fn jit() -> Option<Self> {
Some(VMType::Jit)
}
}
/// Evm factory. Creates appropriate Evm.
@ -94,13 +108,13 @@ impl Factory {
}
/// Create new instance of specific `VMType` factory
#[cfg(test)]
pub fn new(evm: VMType) -> Factory {
pub fn new(evm: VMType) -> Self {
Factory {
evm: evm
}
}
}
impl Default for Factory {
/// Returns jitvm factory
#[cfg(feature = "jit")]

View File

@ -31,6 +31,5 @@ mod tests;
pub use self::evm::{Evm, Error, Result};
pub use self::ext::{Ext, ContractCreateResult, MessageCallResult};
pub use self::factory::Factory;
pub use self::factory::{Factory, VMType};
pub use self::schedule::Schedule;
pub use self::factory::VMType;

View File

@ -18,7 +18,7 @@
use common::*;
use state::*;
use engine::*;
use evm::{self, Ext};
use evm::{self, Ext, Factory};
use externalities::*;
use substate::*;
use trace::{Trace, Tracer, NoopTracer, ExecutiveTracer};
@ -52,33 +52,36 @@ pub struct Executive<'a> {
state: &'a mut State,
info: &'a EnvInfo,
engine: &'a Engine,
vm_factory: &'a Factory,
depth: usize,
}
impl<'a> Executive<'a> {
/// Basic constructor.
pub fn new(state: &'a mut State, info: &'a EnvInfo, engine: &'a Engine) -> Self {
pub fn new(state: &'a mut State, info: &'a EnvInfo, engine: &'a Engine, vm_factory: &'a Factory) -> Self {
Executive {
state: state,
info: info,
engine: engine,
vm_factory: vm_factory,
depth: 0,
}
}
/// Populates executive from parent properties. Increments executive depth.
pub fn from_parent(state: &'a mut State, info: &'a EnvInfo, engine: &'a Engine, parent_depth: usize) -> Self {
pub fn from_parent(state: &'a mut State, info: &'a EnvInfo, engine: &'a Engine, vm_factory: &'a Factory, parent_depth: usize) -> Self {
Executive {
state: state,
info: info,
engine: engine,
vm_factory: vm_factory,
depth: parent_depth + 1,
}
}
/// Creates `Externalities` from `Executive`.
pub fn as_externalities<'_, T>(&'_ mut self, origin_info: OriginInfo, substate: &'_ mut Substate, output: OutputPolicy<'_, '_>, tracer: &'_ mut T) -> Externalities<'_, T> where T: Tracer {
Externalities::new(self.state, self.info, self.engine, self.depth, origin_info, substate, output, tracer)
Externalities::new(self.state, self.info, self.engine, self.vm_factory, self.depth, origin_info, substate, output, tracer)
}
/// This function should be used to execute transaction.
@ -179,8 +182,8 @@ impl<'a> Executive<'a> {
-> evm::Result where T: Tracer {
// Ordinary execution - keep VM in same thread
if (self.depth + 1) % MAX_VM_DEPTH_FOR_THREAD != 0 {
let vm_factory = self.vm_factory;
let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer);
let vm_factory = self.engine.vm_factory();
trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call);
return vm_factory.create().exec(params, &mut ext);
}
@ -189,8 +192,8 @@ impl<'a> Executive<'a> {
// TODO [todr] No thread builder yet, so we need to reset once for a while
// https://github.com/aturon/crossbeam/issues/16
crossbeam::scope(|scope| {
let vm_factory = self.vm_factory;
let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer);
let vm_factory = self.engine.vm_factory();
scope.spawn(move || {
vm_factory.create().exec(params, &mut ext)
@ -458,11 +461,11 @@ mod tests {
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(0x100u64));
let info = EnvInfo::default();
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let mut substate = Substate::new();
let gas_left = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.create(params, &mut substate, &mut NoopTracer).unwrap()
};
@ -517,11 +520,11 @@ mod tests {
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100));
let info = EnvInfo::default();
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let mut substate = Substate::new();
let gas_left = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.create(params, &mut substate, &mut NoopTracer).unwrap()
};
@ -572,12 +575,12 @@ mod tests {
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100));
let info = EnvInfo::default();
let engine = TestEngine::new(5, factory);
let engine = TestEngine::new(5);
let mut substate = Substate::new();
let mut tracer = ExecutiveTracer::default();
let gas_left = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
let output = BytesRef::Fixed(&mut[0u8;0]);
ex.call(params, &mut substate, output, &mut tracer).unwrap()
};
@ -644,12 +647,12 @@ mod tests {
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100));
let info = EnvInfo::default();
let engine = TestEngine::new(5, factory);
let engine = TestEngine::new(5);
let mut substate = Substate::new();
let mut tracer = ExecutiveTracer::default();
let gas_left = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.create(params.clone(), &mut substate, &mut tracer).unwrap()
};
@ -714,11 +717,11 @@ mod tests {
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100));
let info = EnvInfo::default();
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let mut substate = Substate::new();
let gas_left = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.create(params, &mut substate, &mut NoopTracer).unwrap()
};
@ -766,11 +769,11 @@ mod tests {
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100));
let info = EnvInfo::default();
let engine = TestEngine::new(1024, factory);
let engine = TestEngine::new(1024);
let mut substate = Substate::new();
{
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.create(params, &mut substate, &mut NoopTracer).unwrap();
}
@ -827,11 +830,11 @@ mod tests {
state.add_balance(&sender, &U256::from(100_000));
let info = EnvInfo::default();
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let mut substate = Substate::new();
let gas_left = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer).unwrap()
};
@ -872,11 +875,11 @@ mod tests {
let mut state = state_result.reference_mut();
state.init_code(&address, code.clone());
let info = EnvInfo::default();
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let mut substate = Substate::new();
let gas_left = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer).unwrap()
};
@ -906,10 +909,10 @@ mod tests {
state.add_balance(&sender, &U256::from(18));
let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let executed = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts).unwrap()
};
@ -940,10 +943,10 @@ mod tests {
let mut state = state_result.reference_mut();
let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let res = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
};
@ -972,10 +975,10 @@ mod tests {
state.add_balance(&sender, &U256::from(17));
let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let res = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
};
@ -1006,10 +1009,10 @@ mod tests {
let mut info = EnvInfo::default();
info.gas_used = U256::from(20_000);
info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let res = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
};
@ -1040,10 +1043,10 @@ mod tests {
state.add_balance(&sender, &U256::from(100_017));
let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let res = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
};
@ -1074,11 +1077,11 @@ mod tests {
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap());
let info = EnvInfo::default();
let engine = TestEngine::new(0, factory);
let engine = TestEngine::new(0);
let mut substate = Substate::new();
let result = {
let mut ex = Executive::new(&mut state, &info, &engine);
let mut ex = Executive::new(&mut state, &info, &engine, &factory);
ex.create(params, &mut substate, &mut NoopTracer)
};

View File

@ -19,7 +19,7 @@ use common::*;
use state::*;
use engine::*;
use executive::*;
use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult};
use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult, Factory};
use substate::*;
use trace::Tracer;
@ -59,6 +59,7 @@ pub struct Externalities<'a, T> where T: 'a + Tracer {
state: &'a mut State,
env_info: &'a EnvInfo,
engine: &'a Engine,
vm_factory: &'a Factory,
depth: usize,
origin_info: OriginInfo,
substate: &'a mut Substate,
@ -74,6 +75,7 @@ impl<'a, T> Externalities<'a, T> where T: 'a + Tracer {
pub fn new(state: &'a mut State,
env_info: &'a EnvInfo,
engine: &'a Engine,
vm_factory: &'a Factory,
depth: usize,
origin_info: OriginInfo,
substate: &'a mut Substate,
@ -84,6 +86,7 @@ impl<'a, T> Externalities<'a, T> where T: 'a + Tracer {
state: state,
env_info: env_info,
engine: engine,
vm_factory: vm_factory,
depth: depth,
origin_info: origin_info,
substate: substate,
@ -146,7 +149,7 @@ impl<'a, T> Ext for Externalities<'a, T> where T: 'a + Tracer {
};
self.state.inc_nonce(&self.origin_info.address);
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth);
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth);
// TODO: handle internal error separately
match ex.create(params, self.substate, self.tracer) {
@ -185,7 +188,7 @@ impl<'a, T> Ext for Externalities<'a, T> where T: 'a + Tracer {
params.value = ActionValue::Transfer(value);
}
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth);
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth);
match ex.call(params, self.substate, BytesRef::Fixed(output), self.tracer) {
Ok(gas_left) => MessageCallResult::Success(gas_left),
@ -347,7 +350,8 @@ mod tests {
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let vm_factory = Default::default();
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
assert_eq!(ext.env_info().number, 100);
}
@ -358,7 +362,8 @@ mod tests {
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let vm_factory = Default::default();
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let hash = ext.blockhash(&U256::from_str("0000000000000000000000000000000000000000000000000000000000120000").unwrap());
@ -379,7 +384,8 @@ mod tests {
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let vm_factory = Default::default();
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let hash = ext.blockhash(&U256::from_str("0000000000000000000000000000000000000000000000000000000000120000").unwrap());
@ -393,7 +399,8 @@ mod tests {
let state = setup.state.reference_mut();
let mut tracer = NoopTracer;
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let mut output = vec![];
@ -418,7 +425,8 @@ mod tests {
let mut tracer = NoopTracer;
{
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
ext.log(log_topics, &log_data);
}
@ -434,7 +442,8 @@ mod tests {
let mut tracer = NoopTracer;
{
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
let vm_factory = Default::default();
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, &vm_factory, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer);
ext.suicide(&refund_account);
}

View File

@ -61,7 +61,7 @@ pub struct Header {
/// Block difficulty.
pub difficulty: U256,
/// Block seal.
/// Vector of post-RLP-encoded fields.
pub seal: Vec<Bytes>,
/// The memoized hash of the RLP representation *including* the seal fields.
@ -275,4 +275,32 @@ impl Encodable for Header {
#[cfg(test)]
mod tests {
use rustc_serialize::hex::FromHex;
use util::rlp::{decode, encode};
use super::Header;
#[test]
fn test_header_seal_fields() {
// that's rlp of block header created with ethash engine.
let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap();
let mix_hash = "a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd".from_hex().unwrap();
let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap();
let header: Header = decode(&header_rlp);
let seal_fields = header.seal;
assert_eq!(seal_fields.len(), 2);
assert_eq!(seal_fields[0], mix_hash);
assert_eq!(seal_fields[1], nonce);
}
#[test]
fn decode_and_encode_header() {
// that's rlp of block header created with ethash engine.
let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap();
let header: Header = decode(&header_rlp);
let encoded_header = encode(&header).to_vec();
assert_eq!(header_rlp, encoded_header);
}
}

View File

@ -58,6 +58,7 @@ impl<'a, T> TestExt<'a, T> where T: 'a + Tracer {
fn new(state: &'a mut State,
info: &'a EnvInfo,
engine: &'a Engine,
vm_factory: &'a Factory,
depth: usize,
origin_info: OriginInfo,
substate: &'a mut Substate,
@ -66,7 +67,7 @@ impl<'a, T> TestExt<'a, T> where T: 'a + Tracer {
tracer: &'a mut T) -> Self {
TestExt {
contract_address: contract_address(&address, &state.nonce(&address)),
ext: Externalities::new(state, info, engine, depth, origin_info, substate, output, tracer),
ext: Externalities::new(state, info, engine, vm_factory, depth, origin_info, substate, output, tracer),
callcreates: vec![]
}
}
@ -179,7 +180,8 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
let mut state = state_result.reference_mut();
state.populate_from(From::from(vm.pre_state.clone()));
let info = From::from(vm.env);
let engine = TestEngine::new(1, Factory::new(vm_type.clone()));
let engine = TestEngine::new(1);
let vm_factory = Factory::new(vm_type.clone());
let params = ActionParams::from(vm.transaction);
let mut substate = Substate::new();
@ -192,6 +194,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
&mut state,
&info,
&engine,
&vm_factory,
0,
OriginInfo::from(&params),
&mut substate,
@ -199,7 +202,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
params.address.clone(),
&mut tracer,
);
let evm = engine.vm_factory().create();
let evm = vm_factory.create();
let res = evm.exec(params, &mut ex);
(res, ex.callcreates)
};

View File

@ -63,7 +63,8 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec<String> {
let mut state = state_result.reference_mut();
state.populate_from(pre);
state.commit();
let res = state.apply(&env, engine.deref(), &transaction, false);
let vm_factory = Default::default();
let res = state.apply(&env, engine.deref(), &vm_factory, &transaction, false);
if fail_unless(state.root() == &post_state_root) {
println!("!!! {}: State mismatch (got: {}, expect: {}):", name, state.root(), post_state_root);

View File

@ -27,6 +27,10 @@
#![cfg_attr(feature="dev", allow(clone_on_copy))]
// In most cases it expresses function flow better
#![cfg_attr(feature="dev", allow(if_not_else))]
// TODO [todr] a lot of warnings to be fixed
#![cfg_attr(feature="dev", allow(needless_borrow))]
#![cfg_attr(feature="dev", allow(assign_op_pattern))]
//! Ethcore library
//!

View File

@ -19,14 +19,13 @@ use util::hash::Address;
use builtin::Builtin;
use engine::Engine;
use spec::CommonParams;
use evm::{Schedule, Factory};
use evm::Schedule;
use env_info::EnvInfo;
/// An engine which does not provide any consensus mechanism.
pub struct NullEngine {
params: CommonParams,
builtins: BTreeMap<Address, Builtin>,
factory: Factory,
}
impl NullEngine {
@ -35,16 +34,11 @@ impl NullEngine {
NullEngine{
params: params,
builtins: builtins,
factory: Factory::default()
}
}
}
impl Engine for NullEngine {
fn vm_factory(&self) -> &Factory {
&self.factory
}
fn name(&self) -> &str {
"NullEngine"
}

View File

@ -17,6 +17,7 @@
use common::*;
use engine::Engine;
use executive::{Executive, TransactOptions};
use evm::Factory as EvmFactory;
use account_db::*;
use trace::Trace;
#[cfg(test)]
@ -218,11 +219,11 @@ impl State {
/// Execute a given transaction.
/// This will change the state accordingly.
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, vm_factory: &EvmFactory, t: &SignedTransaction, tracing: bool) -> ApplyResult {
// let old = self.to_pod();
let options = TransactOptions { tracing: tracing, check_nonce: true };
let e = try!(Executive::new(self, env_info, engine).transact(t, options));
let e = try!(Executive::new(self, env_info, engine, vm_factory).transact(t, options));
// TODO uncomment once to_pod() works correctly.
// trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod()));
@ -358,12 +359,9 @@ mod tests {
use super::*;
use util::common::*;
use util::trie::*;
use util::rlp::*;
use account::*;
use tests::helpers::*;
use devtools::*;
use evm::factory::*;
use env_info::*;
use spec::*;
use transaction::*;
@ -380,7 +378,7 @@ fn should_apply_create_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -392,7 +390,8 @@ fn should_apply_create_transaction() {
}.sign(&"".sha3());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Create(trace::Create {
@ -440,7 +439,7 @@ fn should_trace_failed_create_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -452,7 +451,8 @@ fn should_trace_failed_create_transaction() {
}.sign(&"".sha3());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Create(trace::Create {
@ -477,7 +477,7 @@ fn should_trace_call_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -490,7 +490,8 @@ fn should_trace_call_transaction() {
state.init_code(&x!(0xa), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -519,7 +520,7 @@ fn should_trace_basic_call_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -531,7 +532,8 @@ fn should_trace_basic_call_transaction() {
}.sign(&"".sha3());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -571,7 +573,8 @@ fn should_trace_call_transaction_to_builtin() {
data: vec![],
}.sign(&"".sha3());
let result = state.apply(&info, engine.deref(), &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
assert_eq!(result.trace, Some(Trace {
depth: 0,
@ -611,7 +614,8 @@ fn should_not_trace_subcall_transaction_to_builtin() {
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("600060006000600060006001610be0f1").unwrap());
let result = state.apply(&info, engine.deref(), &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
@ -653,7 +657,8 @@ fn should_not_trace_callcode() {
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b611000f2").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("6000").unwrap());
let result = state.apply(&info, engine.deref(), &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
@ -698,7 +703,8 @@ fn should_not_trace_delegatecall() {
state.init_code(&x!(0xa), FromHex::from_hex("6000600060006000600b618000f4").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("6000").unwrap());
let result = state.apply(&info, engine.deref(), &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
@ -727,7 +733,7 @@ fn should_trace_failed_call_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -740,7 +746,8 @@ fn should_trace_failed_call_transaction() {
state.init_code(&x!(0xa), FromHex::from_hex("5b600056").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -768,7 +775,7 @@ fn should_trace_call_with_subcall_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -782,7 +789,8 @@ fn should_trace_call_with_subcall_transaction() {
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -825,7 +833,7 @@ fn should_trace_call_with_basic_subcall_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -838,7 +846,8 @@ fn should_trace_call_with_basic_subcall_transaction() {
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006045600b6000f1").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -878,7 +887,7 @@ fn should_not_trace_call_with_invalid_basic_subcall_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -891,7 +900,8 @@ fn should_not_trace_call_with_invalid_basic_subcall_transaction() {
state.init_code(&x!(0xa), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()); // not enough funds.
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -920,7 +930,7 @@ fn should_trace_failed_subcall_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -934,7 +944,8 @@ fn should_trace_failed_subcall_transaction() {
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("5b600056").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -974,7 +985,7 @@ fn should_trace_call_with_subcall_with_subcall_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -989,7 +1000,8 @@ fn should_trace_call_with_subcall_with_subcall_transaction() {
state.init_code(&x!(0xb), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap());
state.init_code(&x!(0xc), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
@ -1046,7 +1058,7 @@ fn should_trace_failed_subcall_with_subcall_transaction() {
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
let engine = TestEngine::new(5, Factory::default());
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
@ -1061,7 +1073,8 @@ fn should_trace_failed_subcall_with_subcall_transaction() {
state.init_code(&x!(0xb), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap());
state.init_code(&x!(0xc), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
let result = state.apply(&info, &engine, &t, true).unwrap();
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {

View File

@ -19,7 +19,7 @@ use common::*;
use spec::*;
use blockchain::{BlockChain, BlockChainConfig};
use state::*;
use evm::{Schedule, Factory};
use evm::Schedule;
use engine::*;
use ethereum;
use devtools::*;
@ -51,15 +51,13 @@ impl<T> GuardedTempResult<T> {
}
pub struct TestEngine {
factory: Factory,
engine: Box<Engine>,
max_depth: usize
}
impl TestEngine {
pub fn new(max_depth: usize, factory: Factory) -> TestEngine {
pub fn new(max_depth: usize) -> TestEngine {
TestEngine {
factory: factory,
engine: ethereum::new_frontier_test().engine,
max_depth: max_depth
}
@ -79,10 +77,6 @@ impl Engine for TestEngine {
self.engine.builtins()
}
fn vm_factory(&self) -> &Factory {
&self.factory
}
fn schedule(&self, _env_info: &EnvInfo) -> Schedule {
let mut schedule = Schedule::new_frontier();
schedule.max_depth = self.max_depth;

View File

@ -22,7 +22,7 @@ use std::sync::{RwLock, Arc};
use std::path::Path;
use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{FixedHash, H256, H264, Database, DBTransaction};
use util::{H256, H264, Database, DBTransaction};
use header::BlockNumber;
use trace::{BlockTraces, LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest,
DatabaseExtras};

View File

@ -19,7 +19,6 @@
use util::hash::H256;
use header::BlockNumber;
use ipc::binary::BinaryConvertError;
use ipc::binary::BinaryConvertable;
use std::mem;
use std::collections::VecDeque;

View File

@ -301,11 +301,11 @@ impl<'a> HeaderView<'a> {
/// Returns block extra data.
pub fn extra_data(&self) -> Bytes { self.rlp.val_at(12) }
/// Returns block seal.
/// Returns a vector of post-RLP-encoded seal fields.
pub fn seal(&self) -> Vec<Bytes> {
let mut seal = vec![];
for i in 13..self.rlp.item_count() {
seal.push(self.rlp.val_at(i));
seal.push(self.rlp.at(i).as_raw().to_vec());
}
seal
}
@ -316,3 +316,25 @@ impl<'a> Hashable for HeaderView<'a> {
self.rlp.as_raw().sha3()
}
}
#[cfg(test)]
mod tests {
use rustc_serialize::hex::FromHex;
use util::rlp::View;
use super::BlockView;
#[test]
fn test_header_view_seal_fields() {
// that's rlp of block created with ethash engine.
let block_rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap();
let mix_hash = "a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd".from_hex().unwrap();
let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap();
let block_view = BlockView::new(&block_rlp);
let header_view = block_view.header_view();
let seal_fields = header_view.seal();
assert_eq!(seal_fields.len(), 2);
assert_eq!(seal_fields[0], mix_hash);
assert_eq!(seal_fields[1], nonce);
}
}

View File

@ -33,7 +33,6 @@ use syntax::ast::{
use syntax::ast;
use syntax::codemap::Span;
use syntax::ext::base::{Annotatable, ExtCtxt};
use syntax::ext::build::AstBuilder;
use syntax::ptr::P;
pub struct Error;

View File

@ -252,14 +252,14 @@ fn binary_expr_struct(
map_stmts.push(quote_stmt!(cx, map[$field_index] = total;).unwrap());
if ::syntax::print::pprust::ty_to_string(&codegen::strip_ptr(&field.ty)) == "u8" {
map_stmts.push(quote_stmt!(cx, total = total + 1;).unwrap());
map_stmts.push(quote_stmt!(cx, total += 1;).unwrap());
}
else {
map_stmts.push(quote_stmt!(cx, let size = match $field_type_ident_qualified::len_params() {
0 => mem::size_of::<$field_type_ident>(),
_ => length_stack.pop_front().unwrap(),
}).unwrap());
map_stmts.push(quote_stmt!(cx, total = total + size;).unwrap());
map_stmts.push(quote_stmt!(cx, total += size;).unwrap());
}
};

View File

@ -10,7 +10,7 @@ rustc-serialize = "0.3"
serde = "0.7.0"
serde_json = "0.7.0"
serde_macros = { version = "0.7.0", optional = true }
clippy = { version = "0.0.64", optional = true}
clippy = { version = "0.0.67", optional = true}
[build-dependencies]
serde_codegen = { version = "0.7.0", optional = true }

View File

@ -17,7 +17,7 @@ log = "0.3"
env_logger = "0.3"
rustc-serialize = "0.3"
rayon = "0.3.1"
clippy = { version = "0.0.64", optional = true}
clippy = { version = "0.0.67", optional = true}
[features]
default = []

View File

@ -123,7 +123,7 @@ impl Miner {
// add transactions to old_block
let e = self.engine();
let mut invalid_transactions = HashSet::new();
let mut block = old_block.reopen(e);
let mut block = old_block.reopen(e, chain.vm_factory());
let block_number = block.block().fields().header.number();
// TODO: push new uncles, too.
@ -277,7 +277,8 @@ impl MinerService for Miner {
state.sub_balance(&sender, &balance);
state.add_balance(&sender, &U256::max_value());
let options = TransactOptions { tracing: false, check_nonce: false };
Executive::new(&mut state, &env_info, self.engine()).transact(t, options)
Executive::new(&mut state, &env_info, self.engine(), chain.vm_factory()).transact(t, options)
},
None => {
chain.call(t)

View File

@ -535,12 +535,12 @@ impl TransactionQueue {
/// Update height of all transactions in future transactions set.
fn update_future(&mut self, sender: &Address, current_nonce: U256) {
// We need to drain all transactions for current sender from future and reinsert them with updated height
let all_nonces_from_sender = match self.future.by_address.row(&sender) {
let all_nonces_from_sender = match self.future.by_address.row(sender) {
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
None => vec![],
};
for k in all_nonces_from_sender {
let order = self.future.drop(&sender, &k).unwrap();
let order = self.future.drop(sender, &k).unwrap();
if k >= current_nonce {
self.future.insert(*sender, k, order.update_height(k, current_nonce));
} else {
@ -554,14 +554,14 @@ impl TransactionQueue {
/// Drop all transactions from given sender from `current`.
/// Either moves them to `future` or removes them from queue completely.
fn move_all_to_future(&mut self, sender: &Address, current_nonce: U256) {
let all_nonces_from_sender = match self.current.by_address.row(&sender) {
let all_nonces_from_sender = match self.current.by_address.row(sender) {
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
None => vec![],
};
for k in all_nonces_from_sender {
// Goes to future or is removed
let order = self.current.drop(&sender, &k).unwrap();
let order = self.current.drop(sender, &k).unwrap();
if k >= current_nonce {
self.future.insert(*sender, k, order.update_height(k, current_nonce));
} else {
@ -803,7 +803,7 @@ mod test {
fn new_tx() -> SignedTransaction {
let keypair = KeyPair::create().unwrap();
new_unsigned_tx(U256::from(123)).sign(&keypair.secret())
new_unsigned_tx(U256::from(123)).sign(keypair.secret())
}
@ -1173,9 +1173,9 @@ mod test {
let mut txq = TransactionQueue::new();
let kp = KeyPair::create().unwrap();
let secret = kp.secret();
let tx = new_unsigned_tx(U256::from(123)).sign(&secret);
let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret);
let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret);
let tx = new_unsigned_tx(U256::from(123)).sign(secret);
let tx1 = new_unsigned_tx(U256::from(124)).sign(secret);
let tx2 = new_unsigned_tx(U256::from(125)).sign(secret);
txq.add(tx, &default_nonce, TransactionOrigin::External).unwrap();
assert_eq!(txq.status().pending, 1);
@ -1403,11 +1403,11 @@ mod test {
// given
let mut txq = TransactionQueue::new();
let keypair = KeyPair::create().unwrap();
let tx = new_unsigned_tx(U256::from(123)).sign(&keypair.secret());
let tx = new_unsigned_tx(U256::from(123)).sign(keypair.secret());
let tx2 = {
let mut tx2 = tx.deref().clone();
tx2.gas_price = U256::from(200);
tx2.sign(&keypair.secret())
tx2.sign(keypair.secret())
};
// when
@ -1426,16 +1426,16 @@ mod test {
// given
let mut txq = TransactionQueue::new();
let keypair = KeyPair::create().unwrap();
let tx0 = new_unsigned_tx(U256::from(123)).sign(&keypair.secret());
let tx0 = new_unsigned_tx(U256::from(123)).sign(keypair.secret());
let tx1 = {
let mut tx1 = tx0.deref().clone();
tx1.nonce = U256::from(124);
tx1.sign(&keypair.secret())
tx1.sign(keypair.secret())
};
let tx2 = {
let mut tx2 = tx1.deref().clone();
tx2.gas_price = U256::from(200);
tx2.sign(&keypair.secret())
tx2.sign(keypair.secret())
};
// when

View File

@ -140,6 +140,9 @@ Footprint Options:
the entire system, overrides other cache and queue
options.
Virtual Machine Options:
--jitvm Enable the JIT VM.
Legacy Options:
--geth Run in Geth-compatibility mode. Currently just sets
the IPC path to be the same as Geth's. Overrides
@ -222,6 +225,7 @@ pub struct Args {
pub flag_tx_limit: usize,
pub flag_logging: Option<String>,
pub flag_version: bool,
pub flag_jitvm: bool,
// legacy...
pub flag_geth: bool,
pub flag_nodekey: Option<String>,

View File

@ -26,7 +26,7 @@ use die::*;
use util::*;
use util::keys::store::AccountService;
use util::network_settings::NetworkSettings;
use ethcore::client::{append_path, get_db_path, ClientConfig, Switch};
use ethcore::client::{append_path, get_db_path, ClientConfig, Switch, VMType};
use ethcore::ethereum;
use ethcore::spec::Spec;
use ethsync::SyncConfig;
@ -171,7 +171,7 @@ impl Configuration {
let (listen, public) = self.net_addresses();
ret.listen_address = listen;
ret.public_address = public;
ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3()));
ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(s).unwrap_or_else(|_| s.sha3()));
ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover;
ret.ideal_peers = self.max_peers();
let mut net_path = PathBuf::from(&self.path());
@ -185,7 +185,7 @@ impl Configuration {
let mut latest_era = None;
let jdb_types = [journaldb::Algorithm::Archive, journaldb::Algorithm::EarlyMerge, journaldb::Algorithm::OverlayRecent, journaldb::Algorithm::RefCounted];
for i in jdb_types.into_iter() {
let db = journaldb::new(&append_path(&get_db_path(&Path::new(&self.path()), *i, spec.genesis_header().hash()), "state"), *i);
let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash()), "state"), *i);
trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
match (latest_era, db.latest_era()) {
(Some(best), Some(this)) if best >= this => {}
@ -201,6 +201,7 @@ impl Configuration {
pub fn client_config(&self, spec: &Spec) -> ClientConfig {
let mut client_config = ClientConfig::default();
match self.args.flag_cache {
Some(mb) => {
client_config.blockchain.max_cache_size = mb * 1024 * 1024;
@ -211,12 +212,14 @@ impl Configuration {
client_config.blockchain.max_cache_size = self.args.flag_cache_max_size;
}
}
client_config.tracing.enabled = match self.args.flag_tracing.as_str() {
"auto" => Switch::Auto,
"on" => Switch::On,
"off" => Switch::Off,
_ => { die!("Invalid tracing method given!") }
};
client_config.pruning = match self.args.flag_pruning.as_str() {
"archive" => journaldb::Algorithm::Archive,
"light" => journaldb::Algorithm::EarlyMerge,
@ -225,6 +228,11 @@ impl Configuration {
"auto" => self.find_best_db(spec).unwrap_or(journaldb::Algorithm::OverlayRecent),
_ => { die!("Invalid pruning method given."); }
};
if self.args.flag_jitvm {
client_config.vm_type = VMType::jit().unwrap_or_else(|| die!("Parity built without jit vm."))
}
trace!(target: "parity", "Using pruning strategy of {}", client_config.pruning);
client_config.name = self.args.flag_identity.clone();
client_config.queue.max_mem_use = self.args.flag_queue_max_size;
@ -251,7 +259,7 @@ impl Configuration {
let account_service = AccountService::with_security(Path::new(&self.keys_path()), self.keys_iterations());
if let Some(ref unlocks) = self.args.flag_unlock {
for d in unlocks.split(',') {
let a = Address::from_str(clean_0x(&d)).unwrap_or_else(|_| {
let a = Address::from_str(clean_0x(d)).unwrap_or_else(|_| {
die!("{}: Invalid address for --unlock. Must be 40 hex characters, without the 0x at the beginning.", d)
});
if passwords.iter().find(|p| account_service.unlock_account_no_expire(&a, p).is_ok()).is_none() {
@ -302,7 +310,7 @@ impl Configuration {
pub fn directories(&self) -> Directories {
let db_path = Configuration::replace_home(
&self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path));
self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path));
::std::fs::create_dir_all(&db_path).unwrap_or_else(|e| die_with_io_error("main", e));
let keys_path = Configuration::replace_home(&self.args.flag_keys_path);

View File

@ -18,7 +18,6 @@ use std::sync::{RwLock,Arc};
use std::ops::*;
use ipc::IpcConfig;
use std::collections::HashMap;
use ipc::BinaryConvertable;
use std::mem;
use ipc::binary::BinaryConvertError;
use std::collections::VecDeque;

View File

@ -138,7 +138,7 @@ fn execute_client(conf: Configuration) {
// Build client
let mut service = ClientService::start(
client_config, spec, net_settings, &Path::new(&conf.path())
client_config, spec, net_settings, Path::new(&conf.path())
).unwrap_or_else(|e| die_with_error("Client", e));
panic_handler.forward_from(&service);

View File

@ -37,7 +37,7 @@ impl PriceInfo {
.and_then(|json| json.find_path(&["result", "ethusd"])
.and_then(|obj| match *obj {
Json::String(ref s) => Some(PriceInfo {
ethusd: FromStr::from_str(&s).unwrap()
ethusd: FromStr::from_str(s).unwrap()
}),
_ => None
}))

View File

@ -31,7 +31,7 @@ pub fn setup_log(init: &Option<String>) -> Arc<RotatingLogger> {
if env::var("RUST_LOG").is_ok() {
let lvl = &env::var("RUST_LOG").unwrap();
levels.push_str(&lvl);
levels.push_str(lvl);
levels.push_str(",");
builder.parse(lvl);
}

View File

@ -87,7 +87,7 @@ fn upgrade_from_version(previous_version: &Version) -> Result<usize, Error> {
if upgrade_key.is_applicable(previous_version, &current_version) {
let upgrade_script = upgrades[upgrade_key];
try!(upgrade_script());
count = count + 1;
count += 1;
}
}
Ok(count)

View File

@ -22,7 +22,7 @@ ethminer = { path = "../miner" }
rustc-serialize = "0.3"
transient-hashmap = "0.1"
serde_macros = { version = "0.7.0", optional = true }
clippy = { version = "0.0.64", optional = true}
clippy = { version = "0.0.67", optional = true}
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
[build-dependencies]

View File

@ -27,7 +27,7 @@ use jsonrpc_core::*;
use util::numbers::*;
use util::sha3::*;
use util::bytes::{ToPretty};
use util::rlp::{encode, UntrustedRlp, View};
use util::rlp::{encode, decode, UntrustedRlp, View};
use ethcore::client::*;
use ethcore::block::IsBlock;
use ethcore::views::*;
@ -97,7 +97,7 @@ impl<C, S, A, M, EM> EthClient<C, S, A, M, EM>
timestamp: U256::from(view.timestamp()),
difficulty: view.difficulty(),
total_difficulty: total_difficulty,
seal_fields: view.seal().into_iter().map(Bytes::new).collect(),
seal_fields: view.seal().into_iter().map(|f| decode(&f)).map(Bytes::new).collect(),
uncles: block_view.uncle_hashes(),
transactions: {
if include_txs {
@ -142,7 +142,7 @@ impl<C, S, A, M, EM> EthClient<C, S, A, M, EM>
total_difficulty: uncle.difficulty + parent_difficulty,
receipts_root: uncle.receipts_root,
extra_data: Bytes::new(uncle.extra_data),
seal_fields: uncle.seal.into_iter().map(Bytes::new).collect(),
seal_fields: uncle.seal.into_iter().map(|f| decode(&f)).map(Bytes::new).collect(),
uncles: vec![],
transactions: BlockTransactions::Hashes(vec![]),
};

View File

@ -10,7 +10,7 @@ authors = ["Ethcore <admin@ethcore.io"]
[dependencies]
ethcore-util = { path = "../util" }
ethcore = { path = "../ethcore" }
clippy = { version = "0.0.64", optional = true}
clippy = { version = "0.0.67", optional = true}
ethminer = { path = "../miner" }
log = "0.3"
env_logger = "0.3"

413
sync/src/blocks.rs Normal file
View File

@ -0,0 +1,413 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use ethcore::header::{ Header as BlockHeader};
known_heap_size!(0, HeaderId, SyncBlock);
/// Block data with optional body.
struct SyncBlock {
header: Bytes,
body: Option<Bytes>,
}
/// Used to identify header by transactions and uncles hashes
#[derive(Eq, PartialEq, Hash)]
struct HeaderId {
transactions_root: H256,
uncles: H256
}
/// A collection of blocks and subchain pointers being downloaded. This keeps track of
/// which headers/bodies need to be downloaded, which are being downloaded and also holds
/// the downloaded blocks.
pub struct BlockCollection {
/// Heads of subchains to download
heads: Vec<H256>,
/// Downloaded blocks.
blocks: HashMap<H256, SyncBlock>,
/// Downloaded blocks by parent.
parents: HashMap<H256, H256>,
/// Used to map body to header.
header_ids: HashMap<HeaderId, H256>,
/// First block in `blocks`.
head: Option<H256>,
/// Set of block header hashes being downloaded
downloading_headers: HashSet<H256>,
/// Set of block bodies being downloaded identified by block hash.
downloading_bodies: HashSet<H256>,
}
impl BlockCollection {
/// Create a new instance.
pub fn new() -> BlockCollection {
BlockCollection {
blocks: HashMap::new(),
header_ids: HashMap::new(),
heads: Vec::new(),
parents: HashMap::new(),
head: None,
downloading_headers: HashSet::new(),
downloading_bodies: HashSet::new(),
}
}
/// Clear everything.
pub fn clear(&mut self) {
self.blocks.clear();
self.parents.clear();
self.header_ids.clear();
self.heads.clear();
self.head = None;
self.downloading_headers.clear();
self.downloading_bodies.clear();
}
/// Reset collection for a new sync round with given subchain block hashes.
pub fn reset_to(&mut self, hashes: Vec<H256>) {
self.clear();
self.heads = hashes;
}
/// Insert a set of headers into collection and advance subchain head pointers.
pub fn insert_headers(&mut self, headers: Vec<Bytes>) {
for h in headers.into_iter() {
if let Err(e) = self.insert_header(h) {
trace!(target: "sync", "Ignored invalid header: {:?}", e);
}
}
self.update_heads();
}
/// Insert a collection of block bodies for previously downloaded headers.
pub fn insert_bodies(&mut self, bodies: Vec<Bytes>) {
for b in bodies.into_iter() {
if let Err(e) = self.insert_body(b) {
trace!(target: "sync", "Ignored invalid body: {:?}", e);
}
}
}
/// Returns a set of block hashes that require a body download. The returned set is marked as being downloaded.
pub fn needed_bodies(&mut self, count: usize, _ignore_downloading: bool) -> Vec<H256> {
if self.head.is_none() {
return Vec::new();
}
let mut needed_bodies: Vec<H256> = Vec::new();
let mut head = self.head;
while head.is_some() && needed_bodies.len() < count {
head = self.parents.get(&head.unwrap()).cloned();
if let Some(head) = head {
match self.blocks.get(&head) {
Some(block) if block.body.is_none() && !self.downloading_bodies.contains(&head) => {
needed_bodies.push(head.clone());
}
_ => (),
}
}
}
self.downloading_bodies.extend(needed_bodies.iter());
needed_bodies
}
/// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded.
pub fn needed_headers(&mut self, count: usize, ignore_downloading: bool) -> Option<(H256, usize)> {
// find subchain to download
let mut download = None;
{
for h in &self.heads {
if ignore_downloading || !self.downloading_headers.contains(&h) {
self.downloading_headers.insert(h.clone());
download = Some(h.clone());
break;
}
}
}
download.map(|h| (h, count))
}
/// Unmark a header as being downloaded.
pub fn clear_header_download(&mut self, hash: &H256) {
self.downloading_headers.remove(hash);
}
/// Unmark a block body as being downloaded.
pub fn clear_body_download(&mut self, hash: &H256) {
self.downloading_bodies.remove(hash);
}
/// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain.
pub fn drain(&mut self) -> Vec<Bytes> {
if self.blocks.is_empty() || self.head.is_none() {
return Vec::new();
}
let mut drained = Vec::new();
let mut hashes = Vec::new();
{
let mut blocks = Vec::new();
let mut head = self.head;
while head.is_some() {
head = self.parents.get(&head.unwrap()).cloned();
if let Some(head) = head {
match self.blocks.get(&head) {
Some(block) if block.body.is_some() => {
blocks.push(block);
hashes.push(head);
self.head = Some(head);
}
_ => break,
}
}
}
for block in blocks.drain(..) {
let mut block_rlp = RlpStream::new_list(3);
block_rlp.append_raw(&block.header, 1);
let body = Rlp::new(&block.body.as_ref().unwrap()); // incomplete blocks are filtered out in the loop above
block_rlp.append_raw(body.at(0).as_raw(), 1);
block_rlp.append_raw(body.at(1).as_raw(), 1);
drained.push(block_rlp.out());
}
}
for h in hashes {
self.blocks.remove(&h);
}
trace!("Drained {} blocks, new head :{:?}", drained.len(), self.head);
drained
}
/// Check if the collection is empty. We consider the syncing round complete once
/// there is no block data left and only a single or none head pointer remains.
pub fn is_empty(&self) -> bool {
return self.heads.len() == 0 ||
(self.heads.len() == 1 && self.head.map_or(false, |h| h == self.heads[0]))
}
/// Chech is collection contains a block header.
pub fn contains(&self, hash: &H256) -> bool {
self.blocks.contains_key(hash)
}
/// Return heap size.
pub fn heap_size(&self) -> usize {
//TODO: other collections
self.blocks.heap_size_of_children()
}
/// Check if given block hash is marked as being downloaded.
pub fn is_downloading(&self, hash: &H256) -> bool {
self.downloading_headers.contains(hash) || self.downloading_bodies.contains(hash)
}
fn insert_body(&mut self, b: Bytes) -> Result<(), UtilError> {
let body = UntrustedRlp::new(&b);
let tx = try!(body.at(0));
let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here
let uncles = try!(body.at(1)).as_raw().sha3();
let header_id = HeaderId {
transactions_root: tx_root,
uncles: uncles
};
match self.header_ids.get(&header_id).cloned() {
Some(h) => {
self.header_ids.remove(&header_id);
self.downloading_bodies.remove(&h);
match self.blocks.get_mut(&h) {
Some(ref mut block) => {
trace!(target: "sync", "Got body {}", h);
block.body = Some(body.as_raw().to_vec());
},
None => warn!("Got body with no header {}", h)
}
}
None => trace!(target: "sync", "Ignored unknown/stale block body")
};
Ok(())
}
fn insert_header(&mut self, header: Bytes) -> Result<H256, UtilError> {
let info: BlockHeader = try!(UntrustedRlp::new(&header).as_val());
let hash = info.hash();
if self.blocks.contains_key(&hash) {
return Ok(hash);
}
match self.head {
None if hash == self.heads[0] => {
trace!("New head {}", hash);
self.head = Some(info.parent_hash);
},
_ => ()
}
let mut block = SyncBlock {
header: header,
body: None,
};
let header_id = HeaderId {
transactions_root: info.transactions_root,
uncles: info.uncles_hash
};
if header_id.transactions_root == rlp::SHA3_NULL_RLP && header_id.uncles == rlp::SHA3_EMPTY_LIST_RLP {
// empty body, just mark as downloaded
let mut body_stream = RlpStream::new_list(2);
body_stream.append_raw(&rlp::NULL_RLP, 1);
body_stream.append_raw(&rlp::EMPTY_LIST_RLP, 1);
block.body = Some(body_stream.out());
}
else {
self.header_ids.insert(header_id, hash.clone());
}
self.parents.insert(info.parent_hash.clone(), hash.clone());
self.blocks.insert(hash.clone(), block);
Ok(hash)
}
// update subchain headers
fn update_heads(&mut self) {
let mut new_heads = Vec::new();
let old_subchains: HashSet<_> = { self.heads.iter().map(Clone::clone).collect() };
for s in self.heads.drain(..) {
let mut h = s.clone();
loop {
match self.parents.get(&h) {
Some(next) => {
h = next.clone();
if old_subchains.contains(&h) {
trace!("Completed subchain {:?}", s);
break; // reached head of the other subchain, merge by not adding
}
},
_ => {
new_heads.push(h);
break;
}
}
}
}
self.heads = new_heads;
}
}
#[cfg(test)]
mod test {
use super::BlockCollection;
use ethcore::client::{TestBlockChainClient, EachBlockWith, BlockId, BlockChainClient};
use ethcore::views::HeaderView;
use ethcore::header::BlockNumber;
use util::*;
fn is_empty(bc: &BlockCollection) -> bool {
bc.heads.is_empty() &&
bc.blocks.is_empty() &&
bc.parents.is_empty() &&
bc.header_ids.is_empty() &&
bc.head.is_none() &&
bc.downloading_headers.is_empty() &&
bc.downloading_bodies.is_empty()
}
#[test]
fn create_clear() {
let mut bc = BlockCollection::new();
assert!(is_empty(&bc));
let client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Nothing);
let hashes = (0 .. 100).map(|i| (&client as &BlockChainClient).block_hash(BlockId::Number(i)).unwrap()).collect();
bc.reset_to(hashes);
assert!(!is_empty(&bc));
bc.clear();
assert!(is_empty(&bc));
}
#[test]
fn insert_headers() {
let mut bc = BlockCollection::new();
assert!(is_empty(&bc));
let client = TestBlockChainClient::new();
let nblocks = 200;
client.add_blocks(nblocks, EachBlockWith::Nothing);
let blocks: Vec<_> = (0 .. nblocks).map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap()).collect();
let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect();
let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect();
let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect();
bc.reset_to(heads);
assert!(!bc.is_empty());
assert_eq!(hashes[0], bc.heads[0]);
assert!(bc.needed_bodies(1, false).is_empty());
assert!(!bc.contains(&hashes[0]));
assert!(!bc.is_downloading(&hashes[0]));
let (h, n) = bc.needed_headers(6, false).unwrap();
assert!(bc.is_downloading(&hashes[0]));
assert_eq!(hashes[0], h);
assert_eq!(n, 6);
assert_eq!(bc.downloading_headers.len(), 1);
assert!(bc.drain().is_empty());
bc.insert_headers(headers[0..6].to_vec());
assert_eq!(hashes[5], bc.heads[0]);
for h in &hashes[0..6] {
bc.clear_header_download(h)
}
assert_eq!(bc.downloading_headers.len(), 0);
assert!(!bc.is_downloading(&hashes[0]));
assert!(bc.contains(&hashes[0]));
assert_eq!(&bc.drain()[..], &blocks[0..6]);
assert!(!bc.contains(&hashes[0]));
assert_eq!(hashes[5], bc.head.unwrap());
let (h, _) = bc.needed_headers(6, false).unwrap();
assert_eq!(hashes[5], h);
let (h, _) = bc.needed_headers(6, false).unwrap();
assert_eq!(hashes[20], h);
bc.insert_headers(headers[10..16].to_vec());
assert!(bc.drain().is_empty());
bc.insert_headers(headers[5..10].to_vec());
assert_eq!(&bc.drain()[..], &blocks[6..16]);
assert_eq!(hashes[15], bc.heads[0]);
bc.insert_headers(headers[16..].to_vec());
bc.drain();
assert!(bc.is_empty());
}
#[test]
fn insert_headers_with_gap() {
let mut bc = BlockCollection::new();
assert!(is_empty(&bc));
let client = TestBlockChainClient::new();
let nblocks = 200;
client.add_blocks(nblocks, EachBlockWith::Nothing);
let blocks: Vec<_> = (0 .. nblocks).map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap()).collect();
let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect();
let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect();
let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect();
bc.reset_to(heads);
bc.insert_headers(headers[2..22].to_vec());
assert_eq!(hashes[0], bc.heads[0]);
assert_eq!(hashes[21], bc.heads[1]);
assert!(bc.head.is_none());
bc.insert_headers(headers[0..2].to_vec());
assert!(bc.head.is_some());
assert_eq!(hashes[21], bc.heads[0]);
}
}

View File

@ -791,8 +791,8 @@ impl ChainSync {
self.downloading_hashes.remove(&hash);
}
for b in &peer.asking_blocks {
self.downloading_headers.remove(&b);
self.downloading_bodies.remove(&b);
self.downloading_headers.remove(b);
self.downloading_bodies.remove(b);
}
peer.asking_blocks.clear();
}
@ -1255,7 +1255,7 @@ impl ChainSync {
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp);
self.peers.get_mut(&peer_id).unwrap().latest_hash = chain_info.best_block_hash.clone();
self.peers.get_mut(&peer_id).unwrap().latest_number = Some(chain_info.best_block_number);
sent = sent + 1;
sent += 1;
}
sent
}
@ -1271,7 +1271,7 @@ impl ChainSync {
// If we think peer is too far behind just send one latest hash
peer_best = last_parent.clone();
}
sent = sent + match ChainSync::create_new_hashes_rlp(io.chain(), &peer_best, &chain_info.best_block_hash) {
sent += match ChainSync::create_new_hashes_rlp(io.chain(), &peer_best, &chain_info.best_block_hash) {
Some(rlp) => {
{
let peer = self.peers.get_mut(&peer_id).unwrap();
@ -1668,7 +1668,7 @@ mod tests {
sync.propagate_new_hashes(&chain_info, &mut io);
let data = &io.queue[0].data.clone();
let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(&data));
let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(data));
assert!(result.is_ok());
}
@ -1686,7 +1686,7 @@ mod tests {
sync.propagate_blocks(&chain_info, &mut io);
let data = &io.queue[0].data.clone();
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(&data));
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(data));
assert!(result.is_ok());
}

View File

@ -70,7 +70,7 @@ impl<'c, K:'c, V:'c> Iterator for RangeIterator<'c, K, V> where K: Add<Output =
}
match self.collection.get(self.range) {
Some(&(ref k, ref vec)) => {
Some((*k, &vec))
Some((*k, vec))
},
None => None
}

View File

@ -16,7 +16,6 @@
use util::*;
use ethcore::client::{BlockChainClient, BlockId, EachBlockWith};
use io::SyncIo;
use chain::{SyncState};
use super::helpers::*;

View File

@ -148,7 +148,7 @@ impl TestNet {
let mut total_steps = 0;
while !self.done() {
self.sync_step();
total_steps = total_steps + 1;
total_steps += 1;
}
total_steps
}

View File

@ -28,7 +28,7 @@ crossbeam = "0.2"
slab = "0.1"
sha3 = { path = "sha3" }
serde = "0.7.0"
clippy = { version = "0.0.64", optional = true}
clippy = { version = "0.0.67", optional = true}
json-tests = { path = "json-tests" }
igd = "0.4.2"
ethcore-devtools = { path = "../devtools" }

View File

@ -29,6 +29,10 @@
#![cfg_attr(feature="dev", allow(clone_on_copy))]
// In most cases it expresses function flow better
#![cfg_attr(feature="dev", allow(if_not_else))]
// TODO [todr] a lot of warnings to be fixed
#![cfg_attr(feature="dev", allow(needless_borrow))]
#![cfg_attr(feature="dev", allow(assign_op_pattern))]
//! Ethcore-util library
//!

View File

@ -17,9 +17,10 @@ ethcore-rpc = { path = "../rpc" }
ethcore-util = { path = "../util" }
parity-webapp = { git = "https://github.com/ethcore/parity-webapp.git" }
# List of apps
parity-status = { git = "https://github.com/ethcore/parity-status.git", version = "0.3.7" }
parity-wallet = { git = "https://github.com/ethcore/parity-wallet.git", version = "0.2.0", optional = true }
clippy = { version = "0.0.64", optional = true}
parity-status = { git = "https://github.com/ethcore/parity-status.git", version = "0.4.1" }
parity-idmanager = { git = "https://github.com/ethcore/parity-idmanager-rs.git", version = "0.1.3" }
parity-wallet = { git = "https://github.com/ethcore/parity-wallet.git", version = "0.3.0", optional = true }
clippy = { version = "0.0.67", optional = true}
[features]
default = ["parity-wallet"]

View File

@ -14,19 +14,29 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use endpoint::Endpoints;
use endpoint::{Endpoints, Endpoint};
use page::PageEndpoint;
use proxypac::ProxyPac;
use parity_webapp::WebApp;
extern crate parity_status;
extern crate parity_idmanager;
#[cfg(feature = "parity-wallet")]
extern crate parity_wallet;
pub const DAPPS_DOMAIN : &'static str = ".parity";
pub const RPC_PATH : &'static str = "rpc";
pub const API_PATH : &'static str = "api";
pub const UTILS_PATH : &'static str = "parity-utils";
pub fn main_page() -> &'static str {
"/status/"
}
pub fn utils() -> Box<Endpoint> {
Box::new(PageEndpoint::with_prefix(parity_idmanager::App::default(), UTILS_PATH.to_owned()))
}
pub fn all_endpoints() -> Endpoints {
let mut pages = Endpoints::new();
pages.insert("proxy".to_owned(), ProxyPac::boxed());

View File

@ -61,6 +61,7 @@ mod proxypac;
use std::sync::{Arc, Mutex};
use std::net::SocketAddr;
use std::collections::HashMap;
use jsonrpc_core::{IoHandler, IoDelegate};
use router::auth::{Authorization, NoAuth, HttpBasicAuth};
@ -106,17 +107,21 @@ pub struct Server {
impl Server {
fn start_http<A: Authorization + 'static>(addr: &SocketAddr, authorization: A, handler: Arc<IoHandler>) -> Result<Server, ServerError> {
let panic_handler = Arc::new(Mutex::new(None));
let endpoints = Arc::new(apps::all_endpoints());
let authorization = Arc::new(authorization);
let rpc_endpoint = Arc::new(rpc::rpc(handler, panic_handler.clone()));
let api = Arc::new(api::RestApi::new(endpoints.clone()));
let endpoints = Arc::new(apps::all_endpoints());
let special = Arc::new({
let mut special = HashMap::new();
special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, panic_handler.clone()));
special.insert(router::SpecialEndpoint::Api, api::RestApi::new(endpoints.clone()));
special.insert(router::SpecialEndpoint::Utils, apps::utils());
special
});
try!(hyper::Server::http(addr))
.handle(move |_| router::Router::new(
apps::main_page(),
endpoints.clone(),
rpc_endpoint.clone(),
api.clone(),
special.clone(),
authorization.clone(),
))
.map(|l| Server {

View File

@ -26,13 +26,23 @@ use endpoint::{Endpoint, EndpointPath};
use parity_webapp::WebApp;
pub struct PageEndpoint<T : WebApp + 'static> {
/// Content of the files
pub app: Arc<T>,
/// Prefix to strip from the path (when `None` deducted from `app_id`)
pub prefix: Option<String>,
}
impl<T: WebApp + 'static> PageEndpoint<T> {
pub fn new(app: T) -> Self {
PageEndpoint {
app: Arc::new(app)
app: Arc::new(app),
prefix: None,
}
}
pub fn with_prefix(app: T, prefix: String) -> Self {
PageEndpoint {
app: Arc::new(app),
prefix: Some(prefix),
}
}
}
@ -41,6 +51,7 @@ impl<T: WebApp> Endpoint for PageEndpoint<T> {
fn to_handler(&self, path: EndpointPath) -> Box<server::Handler<HttpStream>> {
Box::new(PageHandler {
app: self.app.clone(),
prefix: self.prefix.clone(),
path: path,
file: None,
write_pos: 0,
@ -50,6 +61,7 @@ impl<T: WebApp> Endpoint for PageEndpoint<T> {
struct PageHandler<T: WebApp + 'static> {
app: Arc<T>,
prefix: Option<String>,
path: EndpointPath,
file: Option<String>,
write_pos: usize,
@ -57,7 +69,8 @@ struct PageHandler<T: WebApp + 'static> {
impl<T: WebApp + 'static> PageHandler<T> {
fn extract_path(&self, path: &str) -> String {
let prefix = "/".to_owned() + &self.path.app_id;
let app_id = &self.path.app_id;
let prefix = "/".to_owned() + self.prefix.as_ref().unwrap_or(app_id);
let prefix_with_slash = prefix.clone() + "/";
// Index file support

View File

@ -17,7 +17,7 @@
//! Serving ProxyPac file
use endpoint::{Endpoint, Handler, ContentHandler, EndpointPath};
use DAPPS_DOMAIN;
use apps::DAPPS_DOMAIN;
pub struct ProxyPac;

View File

@ -23,28 +23,31 @@ pub mod auth;
use DAPPS_DOMAIN;
use std::sync::Arc;
use std::collections::HashMap;
use url::Host;
use hyper;
use hyper::{server, uri, header};
use hyper::{Next, Encoder, Decoder};
use hyper::net::HttpStream;
use apps;
use endpoint::{Endpoint, Endpoints, EndpointPath};
use self::url::Url;
use self::auth::{Authorization, Authorized};
use self::redirect::Redirection;
#[derive(Debug, PartialEq)]
enum SpecialEndpoint {
/// Special endpoints are accessible on every domain (every dapp)
#[derive(Debug, PartialEq, Hash, Eq)]
pub enum SpecialEndpoint {
Rpc,
Api,
None
Utils,
None,
}
pub struct Router<A: Authorization + 'static> {
main_page: &'static str,
endpoints: Arc<Endpoints>,
rpc: Arc<Box<Endpoint>>,
api: Arc<Box<Endpoint>>,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>,
authorization: Arc<A>,
handler: Box<server::Handler<HttpStream>>,
}
@ -63,13 +66,9 @@ impl<A: Authorization + 'static> server::Handler<HttpStream> for Router<A> {
let endpoint = extract_endpoint(&url);
match endpoint {
// First check RPC requests
(ref path, SpecialEndpoint::Rpc) if *req.method() != hyper::method::Method::Get => {
self.rpc.to_handler(path.clone().unwrap_or_default())
},
// Check API requests
(ref path, SpecialEndpoint::Api) => {
self.api.to_handler(path.clone().unwrap_or_default())
// First check special endpoints
(ref path, ref endpoint) if self.special.contains_key(endpoint) => {
self.special.get(endpoint).unwrap().to_handler(path.clone().unwrap_or_default())
},
// Then delegate to dapp
(Some(ref path), _) if self.endpoints.contains_key(&path.app_id) => {
@ -81,7 +80,7 @@ impl<A: Authorization + 'static> server::Handler<HttpStream> for Router<A> {
},
// RPC by default
_ => {
self.rpc.to_handler(EndpointPath::default())
self.special.get(&SpecialEndpoint::Rpc).unwrap().to_handler(EndpointPath::default())
}
}
}
@ -111,16 +110,14 @@ impl<A: Authorization> Router<A> {
pub fn new(
main_page: &'static str,
endpoints: Arc<Endpoints>,
rpc: Arc<Box<Endpoint>>,
api: Arc<Box<Endpoint>>,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>,
authorization: Arc<A>) -> Self {
let handler = rpc.to_handler(EndpointPath::default());
let handler = special.get(&SpecialEndpoint::Rpc).unwrap().to_handler(EndpointPath::default());
Router {
main_page: main_page,
endpoints: endpoints,
rpc: rpc,
api: api,
special: special,
authorization: authorization,
handler: handler,
}
@ -158,9 +155,11 @@ fn extract_endpoint(url: &Option<Url>) -> (Option<EndpointPath>, SpecialEndpoint
if url.path.len() <= 1 {
return SpecialEndpoint::None;
}
match url.path[0].as_ref() {
"rpc" => SpecialEndpoint::Rpc,
"api" => SpecialEndpoint::Api,
apps::RPC_PATH => SpecialEndpoint::Rpc,
apps::API_PATH => SpecialEndpoint::Api,
apps::UTILS_PATH => SpecialEndpoint::Utils,
_ => SpecialEndpoint::None,
}
}
@ -215,6 +214,15 @@ fn should_extract_endpoint() {
}), SpecialEndpoint::Rpc)
);
assert_eq!(
extract_endpoint(&Url::parse("http://my.status.dapp/parity-utils/inject.js").ok()),
(Some(EndpointPath {
app_id: "my.status".to_owned(),
host: "my.status.dapp".to_owned(),
port: 80,
}), SpecialEndpoint::Utils)
);
// By Subdomain
assert_eq!(
extract_endpoint(&Url::parse("http://my.status.dapp/test.html").ok()),
@ -237,7 +245,7 @@ fn should_extract_endpoint() {
// API by subdomain
assert_eq!(
extract_endpoint(&Url::parse("http://my.status.dapp/rpc/").ok()),
extract_endpoint(&Url::parse("http://my.status.dapp/api/").ok()),
(Some(EndpointPath {
app_id: "my.status".to_owned(),
host: "my.status.dapp".to_owned(),

View File

@ -16,7 +16,6 @@
//! HTTP Redirection hyper handler
use std::io::Write;
use hyper::{header, server, Decoder, Encoder, Next};
use hyper::net::HttpStream;
use hyper::status::StatusCode;