Merge branch 'master' into miner-no-default

Conflicts:
	sync/src/lib.rs
This commit is contained in:
Tomasz Drwięga
2016-06-23 21:04:23 +02:00
160 changed files with 29723 additions and 918 deletions

View File

@@ -22,7 +22,7 @@ ethcore-util = { path = "../util" }
evmjit = { path = "../evmjit", optional = true }
ethash = { path = "../ethash" }
num_cpus = "0.2"
clippy = { version = "0.0.76", optional = true}
clippy = { version = "0.0.77", optional = true}
crossbeam = "0.2.9"
lazy_static = "0.2"
ethcore-devtools = { path = "../devtools" }

View File

@@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0x118c30"
"frontierCompatibilityModeLimit": "0x118c30",
"daoRescueSoftFork": false
}
}
},

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0x118c30"
"frontierCompatibilityModeLimit": "0x118c30",
"daoRescueSoftFork": false
}
}
},

View File

@@ -0,0 +1,43 @@
{
"name": "Frontier (Test)",
"engine": {
"Ethash": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0x118c30",
"daoRescueSoftFork": true
}
}
},
"params": {
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x1"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x0000000000000042",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x400000000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
"gasLimit": "0x1388"
},
"accounts": {
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }
}
}

View File

@@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
"daoRescueSoftFork": false
}
}
},

View File

@@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": 0
"frontierCompatibilityModeLimit": 0,
"daoRescueSoftFork": false
}
}
},

View File

@@ -0,0 +1,43 @@
{
"name": "Homestead (Test)",
"engine": {
"Ethash": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": 0,
"daoRescueSoftFork": true
}
}
},
"params": {
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x1"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x0000000000000042",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x400000000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
"gasLimit": "0x1388"
},
"accounts": {
"0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }
}
}

View File

@@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar": "",
"frontierCompatibilityModeLimit": "0x789b0"
"frontierCompatibilityModeLimit": "0x789b0",
"daoRescueSoftFork": false
}
}
},

View File

@@ -9,7 +9,8 @@
"durationLimit": "0x08",
"blockReward": "0x14D1120D7B160000",
"registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050",
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
"daoRescueSoftFork": false
}
}
},

View File

@@ -166,16 +166,16 @@ impl Account {
!self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == Some(SHA3_EMPTY))
}
/// Provide a database to lookup `code_hash`. Should not be called if it is a contract without code.
/// Provide a database to get `code_hash`. Should not be called if it is a contract without code.
pub fn cache_code(&mut self, db: &AccountDB) -> bool {
// TODO: fill out self.code_cache;
trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty());
self.is_cached() ||
match self.code_hash {
Some(ref h) => match db.lookup(h) {
Some(ref h) => match db.get(h) {
Some(x) => { self.code_cache = x.to_vec(); true },
_ => {
warn!("Failed reverse lookup of {}", h);
warn!("Failed reverse get of {}", h);
false
},
},

View File

@@ -30,18 +30,18 @@ impl<'db> HashDB for AccountDB<'db>{
unimplemented!()
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
fn get(&self, key: &H256) -> Option<&[u8]> {
if key == &SHA3_NULL_RLP {
return Some(&NULL_RLP_STATIC);
}
self.db.lookup(&combine_key(&self.address, key))
self.db.get(&combine_key(&self.address, key))
}
fn exists(&self, key: &H256) -> bool {
fn contains(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
self.db.exists(&combine_key(&self.address, key))
self.db.contains(&combine_key(&self.address, key))
}
fn insert(&mut self, _value: &[u8]) -> H256 {
@@ -52,7 +52,7 @@ impl<'db> HashDB for AccountDB<'db>{
unimplemented!()
}
fn kill(&mut self, _key: &H256) {
fn remove(&mut self, _key: &H256) {
unimplemented!()
}
}
@@ -82,18 +82,18 @@ impl<'db> HashDB for AccountDBMut<'db>{
unimplemented!()
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
fn get(&self, key: &H256) -> Option<&[u8]> {
if key == &SHA3_NULL_RLP {
return Some(&NULL_RLP_STATIC);
}
self.db.lookup(&combine_key(&self.address, key))
self.db.get(&combine_key(&self.address, key))
}
fn exists(&self, key: &H256) -> bool {
fn contains(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
self.db.exists(&combine_key(&self.address, key))
self.db.contains(&combine_key(&self.address, key))
}
fn insert(&mut self, value: &[u8]) -> H256 {
@@ -114,12 +114,12 @@ impl<'db> HashDB for AccountDBMut<'db>{
self.db.emplace(key, value.to_vec())
}
fn kill(&mut self, key: &H256) {
fn remove(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
let key = combine_key(&self.address, key);
self.db.kill(&key)
self.db.remove(&key)
}
}

View File

@@ -205,6 +205,13 @@ impl AccountProvider {
self.unlock_account(account, password, Unlock::Temp)
}
/// Checks if given account is unlocked
pub fn is_unlocked<A>(&self, account: A) -> bool where Address: From<A> {
let account = Address::from(account).into();
let unlocked = self.unlocked.read().unwrap();
unlocked.get(&account).is_some()
}
/// Signs the message. Account must be unlocked.
pub fn sign<A, M>(&self, account: A, message: M) -> Result<H520, Error> where Address: From<A>, Message: From<M> {
let account = Address::from(account).into();
@@ -236,7 +243,6 @@ impl AccountProvider {
#[cfg(test)]
mod tests {
use super::AccountProvider;
use ethstore::SecretStore;
use ethstore::ethkey::{Generator, Random};
#[test]

View File

@@ -203,8 +203,9 @@ mod tests {
timestamp: 0,
difficulty: 0.into(),
last_hashes: vec![],
dao_rescue_block_gas_limit: None,
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
});
assert!(schedule.stack_limit > 0);
@@ -253,7 +254,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, addr, 3141562.into(), vec![]).unwrap();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, addr, 3141562.into(), vec![]).unwrap();
let b = b.close_and_lock();
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
assert!(b.try_seal(engine.deref(), seal).is_ok());

View File

@@ -177,6 +177,7 @@ pub struct OpenBlock<'x> {
engine: &'x Engine,
vm_factory: &'x EvmFactory,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
}
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
@@ -188,6 +189,7 @@ pub struct ClosedBlock {
block: ExecutedBlock,
uncle_bytes: Bytes,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
unclosed_state: State,
}
@@ -198,7 +200,6 @@ pub struct ClosedBlock {
pub struct LockedBlock {
block: ExecutedBlock,
uncle_bytes: Bytes,
last_hashes: LastHashes,
}
/// A block that has a valid seal.
@@ -219,9 +220,10 @@ impl<'x> OpenBlock<'x> {
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
author: Address,
gas_floor_target: U256,
extra_data: Bytes
extra_data: Bytes,
) -> Result<Self, Error> {
let state = try!(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()));
let mut r = OpenBlock {
@@ -229,6 +231,7 @@ impl<'x> OpenBlock<'x> {
engine: engine,
vm_factory: vm_factory,
last_hashes: last_hashes,
dao_rescue_block_gas_limit: dao_rescue_block_gas_limit,
};
r.block.base.header.parent_hash = parent.hash();
@@ -285,6 +288,7 @@ impl<'x> OpenBlock<'x> {
/// Get the environment info concerning this block.
pub fn env_info(&self) -> EnvInfo {
// TODO: memoise.
const SOFT_FORK_BLOCK: u64 = 1775000;
EnvInfo {
number: self.block.base.header.number,
author: self.block.base.header.author.clone(),
@@ -293,6 +297,7 @@ impl<'x> OpenBlock<'x> {
last_hashes: self.last_hashes.clone(), // TODO: should be a reference.
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
gas_limit: self.block.base.header.gas_limit.clone(),
dao_rescue_block_gas_limit: if self.block.base.header.number == SOFT_FORK_BLOCK { Some(self.block.base.header.gas_limit) } else { self.dao_rescue_block_gas_limit },
}
}
@@ -339,6 +344,7 @@ impl<'x> OpenBlock<'x> {
block: s.block,
uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes,
dao_rescue_block_gas_limit: s.dao_rescue_block_gas_limit,
unclosed_state: unclosed_state,
}
}
@@ -360,7 +366,6 @@ impl<'x> OpenBlock<'x> {
LockedBlock {
block: s.block,
uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes,
}
}
}
@@ -386,7 +391,6 @@ impl ClosedBlock {
LockedBlock {
block: self.block,
uncle_bytes: self.uncle_bytes,
last_hashes: self.last_hashes,
}
}
@@ -400,6 +404,7 @@ impl ClosedBlock {
engine: engine,
vm_factory: vm_factory,
last_hashes: self.last_hashes,
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit,
}
}
}
@@ -456,7 +461,18 @@ impl IsBlock for SealedBlock {
/// Enact the block given by block header, transactions and uncles
#[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
pub fn enact(
header: &Header,
transactions: &[SignedTransaction],
uncles: &[Header],
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<LockedBlock, Error> {
{
if ::log::max_log_level() >= ::log::LogLevel::Trace {
let s = try!(State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce()));
@@ -464,7 +480,7 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
}
}
let mut b = try!(OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, header.author().clone(), 3141562.into(), header.extra_data().clone()));
let mut b = try!(OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, header.author().clone(), 3141562.into(), header.extra_data().clone()));
b.set_difficulty(*header.difficulty());
b.set_gas_limit(*header.gas_limit());
b.set_timestamp(header.timestamp());
@@ -474,22 +490,52 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
#[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact_bytes(
block_bytes: &[u8],
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<LockedBlock, Error> {
let block = BlockView::new(block_bytes);
let header = block.header();
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, vm_factory)
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
#[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact_verified(
block: &PreverifiedBlock,
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<LockedBlock, Error> {
let view = BlockView::new(&block.bytes);
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, vm_factory)
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<SealedBlock, Error> {
#[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact_and_seal(
block_bytes: &[u8],
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<SealedBlock, Error> {
let header = BlockView::new(block_bytes).header_view();
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, vm_factory)).seal(engine, header.seal())))
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)).seal(engine, header.seal())))
}
#[cfg(test)]
@@ -509,7 +555,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = b.close_and_lock();
let _ = b.seal(engine.deref(), vec![]);
}
@@ -525,7 +571,7 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), 3141562.into(), vec![]).unwrap()
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), 3141562.into(), vec![]).unwrap()
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
let orig_bytes = b.rlp_bytes();
let orig_db = b.drain();
@@ -533,7 +579,7 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default()).unwrap();
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default()).unwrap();
assert_eq!(e.rlp_bytes(), orig_bytes);
@@ -553,7 +599,7 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let vm_factory = Default::default();
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), 3141562.into(), vec![]).unwrap();
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), 3141562.into(), vec![]).unwrap();
let mut uncle1_header = Header::new();
uncle1_header.extra_data = b"uncle1".to_vec();
let mut uncle2_header = Header::new();
@@ -568,7 +614,7 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default()).unwrap();
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default()).unwrap();
let bytes = e.rlp_bytes();
assert_eq!(bytes, orig_bytes);

View File

@@ -130,7 +130,9 @@ impl QueueSignal {
}
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
if let Err(e) = self.message_channel.send(UserMessage(SyncMessage::BlockVerified)) {
debug!("Error sending BlockVerified message: {:?}", e);
}
}
}

View File

@@ -253,12 +253,22 @@ impl BlockChain {
// open extras db
let mut extras_path = path.to_path_buf();
extras_path.push("extras");
let extras_db = Database::open_default(extras_path.to_str().unwrap()).unwrap();
let extras_db = match config.db_cache_size {
None => Database::open_default(extras_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
extras_path.to_str().unwrap()).unwrap(),
};
// open blocks db
let mut blocks_path = path.to_path_buf();
blocks_path.push("blocks");
let blocks_db = Database::open_default(blocks_path.to_str().unwrap()).unwrap();
let blocks_db = match config.db_cache_size {
None => Database::open_default(blocks_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
blocks_path.to_str().unwrap()).unwrap(),
};
let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()};
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));

View File

@@ -23,6 +23,8 @@ pub struct Config {
pub pref_cache_size: usize,
/// Maximum cache size in bytes.
pub max_cache_size: usize,
/// Backing db cache_size
pub db_cache_size: Option<usize>,
}
impl Default for Config {
@@ -30,6 +32,7 @@ impl Default for Config {
Config {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
db_cache_size: None,
}
}
}

View File

@@ -18,6 +18,7 @@
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
use util::*;
use util::panics::*;
use views::BlockView;
@@ -50,6 +51,8 @@ pub use types::block_status::BlockStatus;
use evm::Factory as EvmFactory;
use miner::{Miner, MinerService, TransactionImportResult, AccountDetails};
const MAX_TX_QUEUE_SIZE: usize = 4096;
impl fmt::Display for BlockChainInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{}.{}", self.best_block_number, self.best_block_hash)
@@ -92,6 +95,8 @@ pub struct Client<V = CanonVerifier> where V: Verifier {
verifier: PhantomData<V>,
vm_factory: Arc<EvmFactory>,
miner: Arc<Miner>,
io_channel: IoChannel<NetSyncMessage>,
queue_transactions: AtomicUsize,
}
const HISTORY: u64 = 1200;
@@ -141,7 +146,10 @@ impl<V> Client<V> where V: Verifier {
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
let mut state_db = journaldb::new(&append_path(&path, "state"), config.pruning);
let mut state_db = journaldb::new(
&append_path(&path, "state"),
config.pruning,
config.db_cache_size);
if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
@@ -149,7 +157,7 @@ impl<V> Client<V> where V: Verifier {
let engine = Arc::new(spec.engine);
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel);
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone());
let panic_handler = PanicHandler::new_in_arc();
panic_handler.forward_from(&block_queue);
@@ -165,6 +173,8 @@ impl<V> Client<V> where V: Verifier {
verifier: PhantomData,
vm_factory: Arc::new(EvmFactory::new(config.vm_type)),
miner: miner,
io_channel: message_channel,
queue_transactions: AtomicUsize::new(0),
};
Ok(Arc::new(client))
@@ -220,7 +230,7 @@ impl<V> Client<V> where V: Verifier {
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
let db = self.state_db.lock().unwrap().boxed_clone();
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory);
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(header.parent_hash.clone()), &self.vm_factory);
if let Err(e) = enact_result {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(());
@@ -271,6 +281,7 @@ impl<V> Client<V> where V: Verifier {
let mut import_results = Vec::with_capacity(max_blocks_to_import);
let _import_lock = self.import_lock.lock();
let _timer = PerfTimer::new("import_verified_blocks");
let blocks = self.block_queue.drain(max_blocks_to_import);
let original_best = self.chain_info().best_block_hash;
@@ -361,6 +372,19 @@ impl<V> Client<V> where V: Verifier {
imported
}
/// Import transactions from the IO queue
pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize {
let _timer = PerfTimer::new("import_queued_transactions");
self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst);
let fetch_account = |a: &Address| AccountDetails {
nonce: self.latest_nonce(a),
balance: self.latest_balance(a),
};
let tx = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect();
let results = self.miner.import_transactions(self, tx, fetch_account);
results.len()
}
/// Attempt to get a copy of a specific block's state.
///
/// This will not fail if given BlockID::Latest.
@@ -462,6 +486,7 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: U256::max_value(),
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit(view.parent_hash()),
};
// that's just a copy of the state.
let mut state = self.state();
@@ -747,7 +772,23 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
nonce: self.latest_nonce(a),
balance: self.latest_balance(a),
};
self.miner.import_transactions(transactions, fetch_account)
self.miner.import_transactions(self, transactions, fetch_account)
}
fn queue_transactions(&self, transactions: Vec<Bytes>) {
if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE {
debug!("Ignoring {} transactions: queue is full", transactions.len());
} else {
let len = transactions.len();
match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) {
Ok(_) => {
self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst);
}
Err(e) => {
debug!("Ignoring {} transactions: error queueing: {}", len, e);
}
}
}
}
fn all_transactions(&self) -> Vec<SignedTransaction> {
@@ -767,6 +808,7 @@ impl<V> MiningBlockChainClient for Client<V> where V: Verifier {
self.state_db.lock().unwrap().boxed_clone(),
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
self.build_last_hashes(h.clone()),
self.dao_rescue_block_gas_limit(h.clone()),
author,
gas_floor_target,
extra_data,

View File

@@ -35,4 +35,6 @@ pub struct ClientConfig {
pub pruning: journaldb::Algorithm,
/// The name of the client instance.
pub name: String,
/// State db cache-size if not default
pub db_cache_size: Option<usize>,
}

View File

@@ -42,7 +42,7 @@ use header::{BlockNumber, Header};
use transaction::{LocalizedTransaction, SignedTransaction};
use log_entry::LocalizedLogEntry;
use filter::Filter;
use views::BlockView;
use views::{HeaderView, BlockView};
use error::{ImportResult, ExecutionError};
use receipt::LocalizedReceipt;
use trace::LocalizedTrace;
@@ -193,6 +193,9 @@ pub trait BlockChainClient : Sync + Send {
/// import transactions from network/other 3rd party
fn import_transactions(&self, transactions: Vec<SignedTransaction>) -> Vec<Result<TransactionImportResult, EthError>>;
/// Queue transactions for importing.
fn queue_transactions(&self, transactions: Vec<Bytes>);
/// list all transactions
fn all_transactions(&self) -> Vec<SignedTransaction>;
@@ -221,6 +224,28 @@ pub trait BlockChainClient : Sync + Send {
Err(())
}
}
/// Get `Some` gas limit of SOFT_FORK_BLOCK, or `None` if chain is not yet that long.
fn dao_rescue_block_gas_limit(&self, chain_hash: H256) -> Option<U256> {
const SOFT_FORK_BLOCK: u64 = 1775000;
// shortcut if the canon chain is already known.
if self.chain_info().best_block_number > SOFT_FORK_BLOCK + 1000 {
return self.block_header(BlockID::Number(SOFT_FORK_BLOCK)).map(|header| HeaderView::new(&header).gas_limit());
}
// otherwise check according to `chain_hash`.
if let Some(mut header) = self.block_header(BlockID::Hash(chain_hash)) {
if HeaderView::new(&header).number() < SOFT_FORK_BLOCK {
None
} else {
while HeaderView::new(&header).number() != SOFT_FORK_BLOCK {
header = self.block_header(BlockID::Hash(HeaderView::new(&header).parent_hash())).expect("chain is complete; parent of chain entry must be in chain; qed");
}
Some(HeaderView::new(&header).gas_limit())
}
} else {
None
}
}
}
/// Extended client interface used for mining

View File

@@ -188,7 +188,7 @@ impl TestBlockChainClient {
txs.append(&signed_tx);
txs.out()
},
_ => rlp::NULL_RLP.to_vec()
_ => rlp::EMPTY_LIST_RLP.to_vec()
};
let mut rlp = RlpStream::new_list(3);
@@ -491,7 +491,13 @@ impl BlockChainClient for TestBlockChainClient {
balance: balances[a],
};
self.miner.import_transactions(transactions, &fetch_account)
self.miner.import_transactions(self, transactions, &fetch_account)
}
fn queue_transactions(&self, transactions: Vec<Bytes>) {
// import right here
let tx = transactions.into_iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect();
self.import_transactions(tx);
}
fn all_transactions(&self) -> Vec<SignedTransaction> {

View File

@@ -39,6 +39,9 @@ pub struct EnvInfo {
pub last_hashes: LastHashes,
/// The gas used.
pub gas_used: U256,
/// Block gas limit at DAO rescue block SOFT_FORK_BLOCK or None if not yet there.
pub dao_rescue_block_gas_limit: Option<U256>,
}
impl Default for EnvInfo {
@@ -51,6 +54,7 @@ impl Default for EnvInfo {
gas_limit: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
dao_rescue_block_gas_limit: None,
}
}
}
@@ -66,6 +70,7 @@ impl From<ethjson::vm::Env> for EnvInfo {
timestamp: e.timestamp.into(),
last_hashes: (1..cmp::min(number + 1, 257)).map(|i| format!("{}", number - i).as_bytes().sha3()).collect(),
gas_used: U256::zero(),
dao_rescue_block_gas_limit: None,
}
}
}

View File

@@ -228,7 +228,7 @@ pub enum Error {
/// The value of the nonce or mishash is invalid.
PowInvalid,
/// Error concerning TrieDBs
TrieError(TrieError),
Trie(TrieError),
}
impl fmt::Display for Error {
@@ -244,7 +244,7 @@ impl fmt::Display for Error {
f.write_fmt(format_args!("Unknown engine name ({})", name)),
Error::PowHashInvalid => f.write_str("Invalid or out of date PoW hash."),
Error::PowInvalid => f.write_str("Invalid nonce or mishash"),
Error::TrieError(ref err) => f.write_fmt(format_args!("{}", err)),
Error::Trie(ref err) => f.write_fmt(format_args!("{}", err)),
}
}
}
@@ -308,7 +308,7 @@ impl From<IoError> for Error {
impl From<TrieError> for Error {
fn from(err: TrieError) -> Error {
Error::TrieError(err)
Error::Trie(err)
}
}

View File

@@ -41,6 +41,8 @@ pub struct EthashParams {
pub registrar: Address,
/// Homestead transition block number.
pub frontier_compatibility_mode_limit: u64,
/// Enable the soft-fork logic.
pub dao_rescue_soft_fork: bool,
}
impl From<ethjson::spec::EthashParams> for EthashParams {
@@ -53,6 +55,7 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
block_reward: p.block_reward.into(),
registrar: p.registrar.into(),
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.into(),
dao_rescue_soft_fork: p.dao_rescue_soft_fork.into(),
}
}
}
@@ -102,8 +105,9 @@ impl Engine for Ethash {
Schedule::new_frontier()
} else {
let mut s = Schedule::new_homestead();
// TODO: make dependent on gaslimit > 4000000 of block 1760000.
s.reject_dao_transactions = env_info.number >= 1760000;
if self.ethash_params.dao_rescue_soft_fork {
s.reject_dao_transactions = env_info.dao_rescue_block_gas_limit.map_or(false, |x| x <= 4_000_000.into());
}
s
}
}
@@ -319,7 +323,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = b.close();
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
}
@@ -334,7 +338,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]).unwrap();
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), 3141562.into(), vec![]).unwrap();
let mut uncle = Header::new();
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
uncle.author = uncle_author.clone();
@@ -362,7 +366,8 @@ mod tests {
difficulty: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
dao_rescue_block_gas_limit: None,
});
assert!(schedule.stack_limit > 0);
@@ -374,7 +379,8 @@ mod tests {
difficulty: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
dao_rescue_block_gas_limit: None,
});
assert!(!schedule.have_delegate_call);

View File

@@ -33,7 +33,12 @@ use super::spec::*;
pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) }
/// Create a new Frontier mainnet chain spec.
pub fn new_frontier() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier.json")) }
pub fn new_frontier(dao_rescue: bool) -> Spec {
Spec::load(match dao_rescue {
true => include_bytes!("../../res/ethereum/frontier_dao_rescue.json"),
false => include_bytes!("../../res/ethereum/frontier.json"),
})
}
/// Create a new Frontier chain spec as though it never changes to Homestead.
pub fn new_frontier_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier_test.json")) }
@@ -84,7 +89,7 @@ mod tests {
#[test]
fn frontier() {
let frontier = new_frontier();
let frontier = new_frontier(true);
assert_eq!(frontier.state_root(), H256::from_str("d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544").unwrap());
let genesis = frontier.genesis_block();

View File

@@ -16,9 +16,8 @@
//! Just in time compiler execution environment.
use common::*;
use trace::VMTracer;
use evmjit;
use evm::{self, Error, GasLeft};
use evm::{self, GasLeft};
/// Should be used to convert jit types to ethcore
trait FromJit<T>: Sized {
@@ -303,7 +302,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
#[derive(Default)]
pub struct JitEvm {
ctxt: Option<evmjit::ContextHandle>,
context: Option<evmjit::ContextHandle>,
}
impl evm::Evm for JitEvm {
@@ -347,7 +346,7 @@ impl evm::Evm for JitEvm {
data.timestamp = ext.env_info().timestamp as i64;
self.context = Some(unsafe { evmjit::ContextHandle::new(data, schedule, &mut ext_handle) });
let context = self.context.as_ref_mut().unwrap();
let mut context = self.context.as_mut().unwrap();
let res = context.exec();
match res {

View File

@@ -318,7 +318,8 @@ mod tests {
difficulty: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
dao_rescue_block_gas_limit: None,
}
}

View File

@@ -89,34 +89,37 @@ impl Miner {
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn prepare_sealing(&self, chain: &MiningBlockChainClient) {
trace!(target: "miner", "prepare_sealing: entering");
let transactions = self.transaction_queue.lock().unwrap().top_transactions();
let mut sealing_work = self.sealing_work.lock().unwrap();
let best_hash = chain.best_block_header().sha3();
let (transactions, mut open_block) = {
let transactions = {self.transaction_queue.lock().unwrap().top_transactions()};
let mut sealing_work = self.sealing_work.lock().unwrap();
let best_hash = chain.best_block_header().sha3();
/*
// check to see if last ClosedBlock in would_seals is actually same parent block.
// if so
// duplicate, re-open and push any new transactions.
// if at least one was pushed successfully, close and enqueue new ClosedBlock;
// otherwise, leave everything alone.
// otherwise, author a fresh block.
// check to see if last ClosedBlock in would_seals is actually same parent block.
// if so
// duplicate, re-open and push any new transactions.
// if at least one was pushed successfully, close and enqueue new ClosedBlock;
// otherwise, leave everything alone.
// otherwise, author a fresh block.
*/
let mut open_block = match sealing_work.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning");
// add transactions to old_block
let e = self.engine();
old_block.reopen(e, chain.vm_factory())
}
None => {
// block not found - create it.
trace!(target: "miner", "No existing work - making new block");
chain.prepare_open_block(
self.author(),
self.gas_floor_target(),
self.extra_data()
)
}
let open_block = match sealing_work.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning");
// add transactions to old_block
let e = self.engine();
old_block.reopen(e, chain.vm_factory())
}
None => {
// block not found - create it.
trace!(target: "miner", "No existing work - making new block");
chain.prepare_open_block(
self.author(),
self.gas_floor_target(),
self.extra_data()
)
}
};
(transactions, open_block)
};
let mut invalid_transactions = HashSet::new();
@@ -146,14 +149,16 @@ impl Miner {
let block = open_block.close();
let mut queue = self.transaction_queue.lock().unwrap();
let fetch_account = |a: &Address| AccountDetails {
nonce: chain.latest_nonce(a),
balance: chain.latest_balance(a),
};
for hash in invalid_transactions.into_iter() {
queue.remove_invalid(&hash, &fetch_account);
{
let mut queue = self.transaction_queue.lock().unwrap();
for hash in invalid_transactions.into_iter() {
queue.remove_invalid(&hash, &fetch_account);
}
}
if !block.transactions().is_empty() {
@@ -179,6 +184,8 @@ impl Miner {
trace!(target: "miner", "prepare_sealing: unable to generate seal internally");
}
}
let mut sealing_work = self.sealing_work.lock().unwrap();
if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) {
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
sealing_work.push(block);
@@ -250,6 +257,7 @@ impl MinerService for Miner {
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: U256::max_value(),
dao_rescue_block_gas_limit: chain.dao_rescue_block_gas_limit(header.parent_hash().clone()),
};
// that's just a copy of the state.
let mut state = block.state().clone();
@@ -359,13 +367,19 @@ impl MinerService for Miner {
*self.gas_floor_target.read().unwrap()
}
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, fetch_account: T) ->
fn import_transactions<T>(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
Vec<Result<TransactionImportResult, Error>>
where T: Fn(&Address) -> AccountDetails {
let mut transaction_queue = self.transaction_queue.lock().unwrap();
transactions.into_iter()
.map(|tx| transaction_queue.add(tx, &fetch_account, TransactionOrigin::External))
.collect()
let results: Vec<Result<TransactionImportResult, Error>> = {
let mut transaction_queue = self.transaction_queue.lock().unwrap();
transactions.into_iter()
.map(|tx| transaction_queue.add(tx, &fetch_account, TransactionOrigin::External))
.collect()
};
if !results.is_empty() {
self.update_sealing(chain);
}
results
}
fn import_own_transaction<T>(
@@ -547,7 +561,7 @@ impl MinerService for Miner {
for tx in &txs {
let _sender = tx.sender();
}
let _ = self.import_transactions(txs, |a| AccountDetails {
let _ = self.import_transactions(chain, txs, |a| AccountDetails {
nonce: chain.latest_nonce(a),
balance: chain.latest_balance(a),
});
@@ -586,6 +600,7 @@ mod tests {
use util::*;
use client::{TestBlockChainClient, EachBlockWith};
use block::*;
use spec::Spec;
// TODO [ToDr] To uncomment` when TestBlockChainClient can actually return a ClosedBlock.
#[ignore]

View File

@@ -95,7 +95,7 @@ pub trait MinerService : Send + Sync {
fn set_transactions_limit(&self, limit: usize);
/// Imports transactions to transaction queue.
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, fetch_account: T) ->
fn import_transactions<T>(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
Vec<Result<TransactionImportResult, Error>>
where T: Fn(&Address) -> AccountDetails, Self: Sized;

View File

@@ -167,16 +167,17 @@ impl PartialOrd for TransactionOrder {
impl Ord for TransactionOrder {
fn cmp(&self, b: &TransactionOrder) -> Ordering {
// Local transactions should always have priority
if self.origin != b.origin {
return self.origin.cmp(&b.origin);
}
// First check nonce_height
if self.nonce_height != b.nonce_height {
return self.nonce_height.cmp(&b.nonce_height);
}
// Local transactions should always have priority
// NOTE nonce has to be checked first, cause otherwise the order might be wrong.
if self.origin != b.origin {
return self.origin.cmp(&b.origin);
}
// Then compare gas_prices
let a_gas = self.gas_price;
let b_gas = b.gas_price;
@@ -235,22 +236,22 @@ impl TransactionSet {
self.by_priority.insert(order.clone());
let r = self.by_address.insert(sender, nonce, order);
// If transaction was replaced remove it from priority queue
if let Some(ref order) = r {
self.by_priority.remove(order);
if let Some(ref old_order) = r {
self.by_priority.remove(old_order);
}
assert_eq!(self.by_priority.len(), self.by_address.len());
r
}
/// Remove low priority transactions if there is more then specified by given `limit`.
///
/// It drops transactions from this set but also removes associated `VerifiedTransaction`.
/// Returns addresses and highes nonces of transactions removed because of limit.
/// Returns addresses and lowest nonces of transactions removed because of limit.
fn enforce_limit(&mut self, by_hash: &mut HashMap<H256, VerifiedTransaction>) -> Option<HashMap<Address, U256>> {
let len = self.by_priority.len();
if len <= self.limit {
return None;
}
let to_drop : Vec<(Address, U256)> = {
self.by_priority
.iter()
@@ -269,8 +270,8 @@ impl TransactionSet {
by_hash.remove(&order.hash)
.expect("Hash found in `by_priorty` matches the one dropped; so it is included in `by_hash`");
let max = removed.get(&sender).map_or(nonce, |val| cmp::max(*val, nonce));
removed.insert(sender, max);
let min = removed.get(&sender).map_or(nonce, |val| cmp::min(*val, nonce));
removed.insert(sender, min);
removed
}))
}
@@ -279,8 +280,10 @@ impl TransactionSet {
fn drop(&mut self, sender: &Address, nonce: &U256) -> Option<TransactionOrder> {
if let Some(tx_order) = self.by_address.remove(sender, nonce) {
self.by_priority.remove(&tx_order);
assert_eq!(self.by_priority.len(), self.by_address.len());
return Some(tx_order);
}
assert_eq!(self.by_priority.len(), self.by_address.len());
None
}
@@ -468,7 +471,9 @@ impl TransactionQueue {
}));
}
self.import_tx(vtx, client_account.nonce).map_err(Error::Transaction)
let r = self.import_tx(vtx, client_account.nonce).map_err(Error::Transaction);
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
r
}
/// Removes all transactions from particular sender up to (excluding) given client (state) nonce.
@@ -484,6 +489,7 @@ impl TransactionQueue {
// And now lets check if there is some batch of transactions in future
// that should be placed in current. It should also update last_nonces.
self.move_matching_future_to_current(sender, client_nonce, client_nonce);
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
}
/// Removes invalid transaction identified by hash from queue.
@@ -493,6 +499,8 @@ impl TransactionQueue {
/// If gap is introduced marks subsequent transactions as future
pub fn remove_invalid<T>(&mut self, transaction_hash: &H256, fetch_account: &T)
where T: Fn(&Address) -> AccountDetails {
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
let transaction = self.by_hash.remove(transaction_hash);
if transaction.is_none() {
// We don't know this transaction
@@ -511,22 +519,17 @@ impl TransactionQueue {
// And now lets check if there is some chain of transactions in future
// that should be placed in current
self.move_matching_future_to_current(sender, current_nonce, current_nonce);
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
return;
}
// Remove from current
let order = self.current.drop(&sender, &nonce);
if order.is_some() {
// We will either move transaction to future or remove it completely
// so there will be no transactions from this sender in current
self.last_nonces.remove(&sender);
// First update height of transactions in future to avoid collisions
self.update_future(&sender, current_nonce);
// This should move all current transactions to future and remove old transactions
self.move_all_to_future(&sender, current_nonce);
// And now lets check if there is some chain of transactions in future
// that should be placed in current. It should also update last_nonces.
self.move_matching_future_to_current(sender, current_nonce, current_nonce);
// This will keep consistency in queue
// Moves all to future and then promotes a batch from current:
self.remove_all(sender, current_nonce);
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
return;
}
}
@@ -545,7 +548,7 @@ impl TransactionQueue {
} else {
trace!(target: "miner", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
// Remove the transaction completely
self.by_hash.remove(&order.hash);
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
}
}
}
@@ -565,7 +568,7 @@ impl TransactionQueue {
self.future.insert(*sender, k, order.update_height(k, current_nonce));
} else {
trace!(target: "miner", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
self.by_hash.remove(&order.hash);
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
}
}
self.future.enforce_limit(&mut self.by_hash);
@@ -664,21 +667,27 @@ impl TransactionQueue {
.cloned()
.map_or(state_nonce, |n| n + U256::one());
// Check height
if nonce > next_nonce {
// We have a gap - put to future
try!(check_too_cheap(Self::replace_transaction(tx, next_nonce, &mut self.future, &mut self.by_hash)));
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash)));
return Ok(TransactionImportResult::Future);
} else if nonce < state_nonce {
// The transaction might be old, let's check that.
// This has to be the first test, otherwise calculating
// nonce height would result in overflow.
if nonce < state_nonce {
// Droping transaction
trace!(target: "miner", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce);
return Err(TransactionError::Old);
} else if nonce > next_nonce {
// We have a gap - put to future.
// Update nonces of transactions in future (remove old transactions)
self.update_future(&address, state_nonce);
// Insert transaction (or replace old one with lower gas price)
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash)));
// Return an error if this transaction is not imported because of limit.
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash)));
return Ok(TransactionImportResult::Future);
}
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash)));
// Keep track of highest nonce stored in current
self.last_nonces.insert(address, nonce);
let new_max = self.last_nonces.get(&address).map_or(nonce, |n| cmp::max(nonce, *n));
self.last_nonces.insert(address, new_max);
// Update nonces of transactions in future
self.update_future(&address, state_nonce);
// Maybe there are some more items waiting in future?
@@ -687,15 +696,16 @@ impl TransactionQueue {
// same (sender, nonce), but above function would not move it.
if let Some(order) = self.future.drop(&address, &nonce) {
// Let's insert that transaction to current (if it has higher gas_price)
let future_tx = self.by_hash.remove(&order.hash).unwrap();
try!(check_too_cheap(Self::replace_transaction(future_tx, state_nonce, &mut self.current, &mut self.by_hash)));
let future_tx = self.by_hash.remove(&order.hash).expect("All transactions in `future` are always in `by_hash`.");
// if transaction in `current` (then one we are importing) is replaced it means that it has to low gas_price
try!(check_too_cheap(!Self::replace_transaction(future_tx, state_nonce, &mut self.current, &mut self.by_hash)));
}
// Also enforce the limit
let removed = self.current.enforce_limit(&mut self.by_hash);
// If some transaction were removed because of limit we need to update last_nonces also.
self.update_last_nonces(&removed);
// Trigger error if we were removed.
// Trigger error if the transaction we are importing was removed.
try!(check_if_removed(&address, &nonce, removed));
trace!(target: "miner", "status: {:?}", self.status());
@@ -703,9 +713,9 @@ impl TransactionQueue {
}
/// Updates
fn update_last_nonces(&mut self, removed_max_nonces: &Option<HashMap<Address, U256>>) {
if let Some(ref max_nonces) = *removed_max_nonces {
for (sender, nonce) in max_nonces.iter() {
fn update_last_nonces(&mut self, removed_min_nonces: &Option<HashMap<Address, U256>>) {
if let Some(ref min_nonces) = *removed_min_nonces {
for (sender, nonce) in min_nonces.iter() {
if *nonce == U256::zero() {
self.last_nonces.remove(sender);
} else {
@@ -728,7 +738,9 @@ impl TransactionQueue {
let address = tx.sender();
let nonce = tx.nonce();
by_hash.insert(hash, tx);
let old_hash = by_hash.insert(hash, tx);
assert!(old_hash.is_none(), "Each hash has to be inserted exactly once.");
if let Some(old) = set.insert(address, nonce, order.clone()) {
// There was already transaction in queue. Let's check which one should stay
@@ -738,11 +750,11 @@ impl TransactionQueue {
// Put back old transaction since it has greater priority (higher gas_price)
set.insert(address, nonce, old);
// and remove new one
by_hash.remove(&hash);
by_hash.remove(&hash).expect("The hash has been just inserted and no other line is altering `by_hash`.");
false
} else {
// Make sure we remove old transaction entirely
by_hash.remove(&old.hash);
by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`.");
true
}
} else {
@@ -762,7 +774,7 @@ fn check_too_cheap(is_in: bool) -> Result<(), TransactionError> {
fn check_if_removed(sender: &Address, nonce: &U256, dropped: Option<HashMap<Address, U256>>) -> Result<(), TransactionError> {
match dropped {
Some(ref dropped) => match dropped.get(sender) {
Some(max) if nonce <= max => {
Some(min) if nonce >= min => {
Err(TransactionError::LimitReached)
},
_ => Ok(()),
@@ -939,7 +951,7 @@ mod test {
let res = txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External);
// and then there should be only one transaction in current (the one with higher gas_price)
assert_eq!(unwrap_tx_err(res), TransactionError::TooCheapToReplace);
assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(txq.status().pending, 1);
assert_eq!(txq.status().future, 0);
assert_eq!(txq.current.by_priority.len(), 1);
@@ -1087,7 +1099,28 @@ mod test {
}
#[test]
fn should_prioritize_local_transactions() {
fn should_prioritize_local_transactions_within_same_nonce_height() {
// given
let mut txq = TransactionQueue::new();
let tx = new_tx();
// the second one has same nonce but higher `gas_price`
let (_, tx2) = new_similar_txs();
// when
// first insert the one with higher gas price
txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap();
// then the one with lower gas price, but local
txq.add(tx.clone(), &default_nonce, TransactionOrigin::Local).unwrap();
// then
let top = txq.top_transactions();
assert_eq!(top[0], tx); // local should be first
assert_eq!(top[1], tx2);
assert_eq!(top.len(), 2);
}
#[test]
fn should_not_prioritize_local_transactions_with_different_nonce_height() {
// given
let mut txq = TransactionQueue::new();
let (tx, tx2) = new_txs(U256::from(1));
@@ -1098,8 +1131,8 @@ mod test {
// then
let top = txq.top_transactions();
assert_eq!(top[0], tx2);
assert_eq!(top[1], tx);
assert_eq!(top[0], tx);
assert_eq!(top[1], tx2);
assert_eq!(top.len(), 2);
}
@@ -1555,4 +1588,54 @@ mod test {
assert_eq!(txq.has_local_pending_transactions(), true);
}
#[test]
fn should_keep_right_order_in_future() {
// given
let mut txq = TransactionQueue::with_limit(1);
let (tx1, tx2) = new_txs(U256::from(1));
let prev_nonce = |a: &Address| AccountDetails { nonce: default_nonce(a).nonce - U256::one(), balance:
default_nonce(a).balance };
// when
assert_eq!(txq.add(tx2, &prev_nonce, TransactionOrigin::External).unwrap(), TransactionImportResult::Future);
assert_eq!(txq.add(tx1.clone(), &prev_nonce, TransactionOrigin::External).unwrap(), TransactionImportResult::Future);
// then
assert_eq!(txq.future.by_priority.len(), 1);
assert_eq!(txq.future.by_priority.iter().next().unwrap().hash, tx1.hash());
}
#[test]
fn should_return_correct_last_nonce() {
// given
let mut txq = TransactionQueue::new();
let (tx1, tx2, tx2_2, tx3) = {
let keypair = KeyPair::create().unwrap();
let secret = &keypair.secret();
let nonce = U256::from(123);
let tx = new_unsigned_tx(nonce);
let tx2 = new_unsigned_tx(nonce + 1.into());
let mut tx2_2 = new_unsigned_tx(nonce + 1.into());
tx2_2.gas_price = U256::from(5);
let tx3 = new_unsigned_tx(nonce + 2.into());
(tx.sign(secret), tx2.sign(secret), tx2_2.sign(secret), tx3.sign(secret))
};
let sender = tx1.sender().unwrap();
txq.add(tx1, &default_nonce, TransactionOrigin::Local).unwrap();
txq.add(tx2, &default_nonce, TransactionOrigin::Local).unwrap();
txq.add(tx3, &default_nonce, TransactionOrigin::Local).unwrap();
assert_eq!(txq.future.by_priority.len(), 0);
assert_eq!(txq.current.by_priority.len(), 3);
// when
let res = txq.add(tx2_2, &default_nonce, TransactionOrigin::Local);
// then
assert_eq!(txq.last_nonce(&sender).unwrap(), 125.into());
assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(txq.current.by_priority.len(), 3);
}
}

View File

@@ -41,6 +41,8 @@ pub enum SyncMessage {
NewChainHead,
/// A block is ready
BlockVerified,
/// New transaction RLPs are ready to be imported
NewTransactions(Vec<Bytes>),
/// Start network command.
StartNetwork,
/// Stop network command.
@@ -136,6 +138,9 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
SyncMessage::BlockVerified => {
self.client.import_verified_blocks(&io.channel());
},
SyncMessage::NewTransactions(ref transactions) => {
self.client.import_queued_transactions(&transactions);
},
_ => {}, // ignore other messages
}
}
@@ -154,9 +159,15 @@ mod tests {
#[test]
fn it_can_be_started() {
let spec = get_test_spec();
let temp_path = RandomTempPath::new();
let service = ClientService::start(ClientConfig::default(), spec, NetworkConfiguration::new_local(), &temp_path.as_path(), Arc::new(Miner::with_spec(spec)), false);
let service = ClientService::start(
ClientConfig::default(),
get_test_spec(),
NetworkConfiguration::new_local(),
&temp_path.as_path(),
Arc::new(Miner::with_spec(get_test_spec())),
false
);
assert!(service.is_ok());
}
}

View File

@@ -222,19 +222,25 @@ impl State {
let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true };
let e = try!(Executive::new(self, env_info, engine, vm_factory).transact(t, options));
let broken_dao = H256::from("7278d050619a624f84f51987149ddb439cdaadfba5966f7cfaea7ad44340a4ba");
let broken_dao = H256::from("6a5d24750f78441e56fec050dc52fe8e911976485b7472faac7464a176a67caa");
// dao attack soft fork
if engine.schedule(&env_info).reject_dao_transactions {
// collect all the addresses which have changed.
let addresses = self.cache.borrow().iter().map(|(addr, _)| addr.clone()).collect::<Vec<_>>();
let whitelisted = if let Action::Call(to) = t.action {
to == Address::from("Da4a4626d3E16e094De3225A751aAb7128e96526") ||
to == Address::from("2ba9D006C1D72E67A70b5526Fc6b4b0C0fd6D334")
} else { false };
if !whitelisted {
// collect all the addresses which have changed.
let addresses = self.cache.borrow().iter().map(|(addr, _)| addr.clone()).collect::<Vec<_>>();
for a in &addresses {
if self.code(a).map_or(false, |c| c.sha3() == broken_dao) {
// Figure out if the balance has been reduced.
let maybe_original = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR).get(&a).map(Account::from_rlp);
if maybe_original.map_or(false, |original| *original.balance() > self.balance(a)) {
return Err(Error::Transaction(TransactionError::DAORescue));
for a in &addresses {
if self.code(a).map_or(false, |c| c.sha3() == broken_dao) {
// Figure out if the balance has been reduced.
let maybe_original = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR).get(&a).map(Account::from_rlp);
if maybe_original.map_or(false, |original| *original.balance() > self.balance(a)) {
return Err(Error::Transaction(TransactionError::DAORescue));
}
}
}
}

View File

@@ -179,6 +179,7 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
db,
&last_header,
last_hashes.clone(),
None,
author.clone(),
3141562.into(),
vec![]
@@ -302,7 +303,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
let temp = RandomTempPath::new();
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge);
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, None);
GuardedTempResult {
_temp: temp,
result: Some(journal_db)
@@ -319,7 +320,7 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
}
pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> {
journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge)
journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, None)
}
pub fn get_temp_state_in(path: &Path) -> State {

View File

@@ -48,6 +48,8 @@ pub struct Config {
pub enabled: Switch,
/// Traces blooms configuration.
pub blooms: BloomConfig,
/// Database cache-size if not default
pub db_cache_size: Option<usize>,
}
impl Default for Config {
@@ -57,7 +59,8 @@ impl Default for Config {
blooms: BloomConfig {
levels: 3,
elements_per_index: 16,
}
},
db_cache_size: None,
}
}
}

View File

@@ -22,7 +22,7 @@ use std::sync::{RwLock, Arc};
use std::path::Path;
use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{H256, H264, Database, DBTransaction};
use util::{H256, H264, Database, DatabaseConfig, DBTransaction};
use header::BlockNumber;
use trace::{BlockTraces, LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest,
DatabaseExtras, Error};
@@ -118,7 +118,12 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
pub fn new(config: Config, path: &Path, extras: Arc<T>) -> Result<Self, Error> {
let mut tracedb_path = path.to_path_buf();
tracedb_path.push("tracedb");
let tracesdb = Database::open_default(tracedb_path.to_str().unwrap()).unwrap();
let tracesdb = match config.db_cache_size {
None => Database::open_default(tracedb_path.to_str().unwrap()).unwrap(),
Some(db_cache) => Database::open(
&DatabaseConfig::with_cache(db_cache),
tracedb_path.to_str().unwrap()).unwrap(),
};
// check if in previously tracing was enabled
let old_tracing = match tracesdb.get(b"enabled").unwrap() {