Merge branch 'master' into presale_wallet

This commit is contained in:
debris 2016-06-22 16:14:02 +02:00
commit 927ffa7e9c
51 changed files with 27413 additions and 336 deletions

10
Cargo.lock generated
View File

@ -630,7 +630,7 @@ dependencies = [
[[package]]
name = "jsonrpc-http-server"
version = "5.1.0"
source = "git+https://github.com/ethcore/jsonrpc-http-server.git#6117b1d77b5a60d6fa2dc884f12aa7f5fd4585ca"
source = "git+https://github.com/ethcore/jsonrpc-http-server.git#0c99d308bc15e8fae50642eff77a3e1fd7610652"
dependencies = [
"hyper 0.9.3 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 2.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -936,7 +936,7 @@ dependencies = [
[[package]]
name = "parity-dapps-builtins"
version = "0.5.1"
source = "git+https://github.com/ethcore/parity-dapps-builtins-rs.git#413ef9a6f9c46d16d578a48e39adb44d5650d7d7"
source = "git+https://github.com/ethcore/parity-dapps-builtins-rs.git#650b0d94d076635904b86c1fd45c5f4a2061463f"
dependencies = [
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)",
]
@ -960,7 +960,7 @@ dependencies = [
[[package]]
name = "parity-minimal-sysui"
version = "0.1.0"
source = "git+https://github.com/ethcore/parity-dapps-minimal-sysui-rs.git#4c704913f671060bb0e43b5ce4a68d02281115d5"
source = "git+https://github.com/ethcore/parity-dapps-minimal-sysui-rs.git#3c6ad40680126a760eb867b07b506ea996819ce3"
[[package]]
name = "phf"
@ -1103,7 +1103,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rocksdb"
version = "0.4.5"
source = "git+https://github.com/ethcore/rust-rocksdb#e0e6c099d8cd156fe446009fce241d57b00cd8f4"
source = "git+https://github.com/ethcore/rust-rocksdb#6f3c68f5f075433d206be4af6a620651cd9f8541"
dependencies = [
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)",
@ -1112,7 +1112,7 @@ dependencies = [
[[package]]
name = "rocksdb-sys"
version = "0.3.0"
source = "git+https://github.com/ethcore/rust-rocksdb#e0e6c099d8cd156fe446009fce241d57b00cd8f4"
source = "git+https://github.com/ethcore/rust-rocksdb#6f3c68f5f075433d206be4af6a620651cd9f8541"
dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -36,7 +36,7 @@ below to build from source.
----
## Building from source
## Build dependencies
Parity is fully compatible with Stable Rust.
@ -55,7 +55,19 @@ We recommend installing Rust through [rustup](https://www.rustup.rs/). If you do
$ rustup default stable-x86_64-pc-windows-msvc
```
Once you have rustup, download and build parity:
Once you have rustup, install parity or download and build from source
----
## Quick install
```bash
cargo install --git https://github.com/ethcore/parity.git parity
```
----
## Build from source
```bash
# download Parity code

25
docker/centos/Dockerfile Normal file
View File

@ -0,0 +1,25 @@
FROM centos:latest
WORKDIR /build
# install tools and dependencies
RUN yum -y update&& \
yum install -y git make gcc-c++ gcc file
# install rustup
RUN curl -sSf https://static.rust-lang.org/rustup.sh -o rustup.sh &&\
ls&&\
sh rustup.sh -s -- --disable-sudo
# show backtraces
ENV RUST_BACKTRACE 1
ENV CXX g++
ENV CC gcc
RUN rustc -vV && \
cargo -V && \
gcc -v &&\
g++ -v
# git clone parity
RUN git clone https://github.com/ethcore/parity && \
cd parity&&\
ls -a&&\
cargo build --release --verbose && \
ls /build/parity/target/release/parity && \
file /build/parity/target/release/parity && \
RUN file /build/parity/target/release/parity

View File

@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0x118c30"
"frontierCompatibilityModeLimit": "0x118c30",
"daoRescueSoftFork": false
}
}
},

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0x118c30"
"frontierCompatibilityModeLimit": "0x118c30",
"daoRescueSoftFork": false
}
}
},

View File

@ -0,0 +1,43 @@
{
"name": "Frontier (Test)",
"engine": {
"Ethash": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0x118c30",
"daoRescueSoftFork": true
}
}
},
"params": {
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x1"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x0000000000000042",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x400000000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
"gasLimit": "0x1388"
},
"accounts": {
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }
}
}

View File

@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
"daoRescueSoftFork": false
}
}
},

View File

@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": 0
"frontierCompatibilityModeLimit": 0,
"daoRescueSoftFork": false
}
}
},

View File

@ -0,0 +1,43 @@
{
"name": "Homestead (Test)",
"engine": {
"Ethash": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": 0,
"daoRescueSoftFork": true
}
}
},
"params": {
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x1"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x0000000000000042",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x400000000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
"gasLimit": "0x1388"
},
"accounts": {
"0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }
}
}

View File

@ -9,7 +9,8 @@
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar": "",
"frontierCompatibilityModeLimit": "0x789b0"
"frontierCompatibilityModeLimit": "0x789b0",
"daoRescueSoftFork": false
}
}
},

View File

@ -9,7 +9,8 @@
"durationLimit": "0x08",
"blockReward": "0x14D1120D7B160000",
"registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050",
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
"daoRescueSoftFork": false
}
}
},

View File

@ -203,8 +203,9 @@ mod tests {
timestamp: 0,
difficulty: 0.into(),
last_hashes: vec![],
dao_rescue_block_gas_limit: None,
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
});
assert!(schedule.stack_limit > 0);
@ -253,7 +254,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, addr, 3141562.into(), vec![]).unwrap();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, addr, 3141562.into(), vec![]).unwrap();
let b = b.close_and_lock();
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
assert!(b.try_seal(engine.deref(), seal).is_ok());

View File

@ -177,6 +177,7 @@ pub struct OpenBlock<'x> {
engine: &'x Engine,
vm_factory: &'x EvmFactory,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
}
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
@ -188,6 +189,7 @@ pub struct ClosedBlock {
block: ExecutedBlock,
uncle_bytes: Bytes,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
unclosed_state: State,
}
@ -198,7 +200,6 @@ pub struct ClosedBlock {
pub struct LockedBlock {
block: ExecutedBlock,
uncle_bytes: Bytes,
last_hashes: LastHashes,
}
/// A block that has a valid seal.
@ -219,9 +220,10 @@ impl<'x> OpenBlock<'x> {
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
author: Address,
gas_floor_target: U256,
extra_data: Bytes
extra_data: Bytes,
) -> Result<Self, Error> {
let state = try!(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()));
let mut r = OpenBlock {
@ -229,6 +231,7 @@ impl<'x> OpenBlock<'x> {
engine: engine,
vm_factory: vm_factory,
last_hashes: last_hashes,
dao_rescue_block_gas_limit: dao_rescue_block_gas_limit,
};
r.block.base.header.parent_hash = parent.hash();
@ -293,6 +296,7 @@ impl<'x> OpenBlock<'x> {
last_hashes: self.last_hashes.clone(), // TODO: should be a reference.
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
gas_limit: self.block.base.header.gas_limit.clone(),
dao_rescue_block_gas_limit: if self.block.base.header.number == 1760000 { Some(self.block.base.header.gas_limit) } else { self.dao_rescue_block_gas_limit },
}
}
@ -339,6 +343,7 @@ impl<'x> OpenBlock<'x> {
block: s.block,
uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes,
dao_rescue_block_gas_limit: s.dao_rescue_block_gas_limit,
unclosed_state: unclosed_state,
}
}
@ -360,7 +365,6 @@ impl<'x> OpenBlock<'x> {
LockedBlock {
block: s.block,
uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes,
}
}
}
@ -386,7 +390,6 @@ impl ClosedBlock {
LockedBlock {
block: self.block,
uncle_bytes: self.uncle_bytes,
last_hashes: self.last_hashes,
}
}
@ -400,6 +403,7 @@ impl ClosedBlock {
engine: engine,
vm_factory: vm_factory,
last_hashes: self.last_hashes,
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit,
}
}
}
@ -456,7 +460,18 @@ impl IsBlock for SealedBlock {
/// Enact the block given by block header, transactions and uncles
#[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
pub fn enact(
header: &Header,
transactions: &[SignedTransaction],
uncles: &[Header],
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<LockedBlock, Error> {
{
if ::log::max_log_level() >= ::log::LogLevel::Trace {
let s = try!(State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce()));
@ -464,7 +479,7 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
}
}
let mut b = try!(OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, header.author().clone(), 3141562.into(), header.extra_data().clone()));
let mut b = try!(OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, header.author().clone(), 3141562.into(), header.extra_data().clone()));
b.set_difficulty(*header.difficulty());
b.set_gas_limit(*header.gas_limit());
b.set_timestamp(header.timestamp());
@ -474,22 +489,49 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
pub fn enact_bytes(
block_bytes: &[u8],
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<LockedBlock, Error> {
let block = BlockView::new(block_bytes);
let header = block.header();
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, vm_factory)
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<LockedBlock, Error> {
pub fn enact_verified(
block: &PreverifiedBlock,
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<LockedBlock, Error> {
let view = BlockView::new(&block.bytes);
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, vm_factory)
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, vm_factory: &EvmFactory) -> Result<SealedBlock, Error> {
pub fn enact_and_seal(
block_bytes: &[u8],
engine: &Engine,
tracing: bool,
db: Box<JournalDB>,
parent: &Header,
last_hashes: LastHashes,
dao_rescue_block_gas_limit: Option<U256>,
vm_factory: &EvmFactory
) -> Result<SealedBlock, Error> {
let header = BlockView::new(block_bytes).header_view();
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, vm_factory)).seal(engine, header.seal())))
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)).seal(engine, header.seal())))
}
#[cfg(test)]
@ -509,7 +551,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = b.close_and_lock();
let _ = b.seal(engine.deref(), vec![]);
}
@ -525,7 +567,7 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), 3141562.into(), vec![]).unwrap()
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), 3141562.into(), vec![]).unwrap()
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
let orig_bytes = b.rlp_bytes();
let orig_db = b.drain();
@ -533,7 +575,7 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default()).unwrap();
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default()).unwrap();
assert_eq!(e.rlp_bytes(), orig_bytes);
@ -553,7 +595,7 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let vm_factory = Default::default();
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), 3141562.into(), vec![]).unwrap();
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), 3141562.into(), vec![]).unwrap();
let mut uncle1_header = Header::new();
uncle1_header.extra_data = b"uncle1".to_vec();
let mut uncle2_header = Header::new();
@ -568,7 +610,7 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default()).unwrap();
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default()).unwrap();
let bytes = e.rlp_bytes();
assert_eq!(bytes, orig_bytes);

View File

@ -130,7 +130,9 @@ impl QueueSignal {
}
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
if let Err(e) = self.message_channel.send(UserMessage(SyncMessage::BlockVerified)) {
debug!("Error sending BlockVerified message: {:?}", e);
}
}
}

View File

@ -230,7 +230,7 @@ impl<V> Client<V> where V: Verifier {
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
let db = self.state_db.lock().unwrap().boxed_clone();
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory);
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(), &self.vm_factory);
if let Err(e) = enact_result {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(());
@ -381,7 +381,7 @@ impl<V> Client<V> where V: Verifier {
balance: self.latest_balance(a),
};
let tx = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect();
let results = self.miner.import_transactions(tx, fetch_account);
let results = self.miner.import_transactions(self, tx, fetch_account);
results.len()
}
@ -486,6 +486,7 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: U256::max_value(),
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit(),
};
// that's just a copy of the state.
let mut state = self.state();
@ -771,7 +772,7 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
nonce: self.latest_nonce(a),
balance: self.latest_balance(a),
};
self.miner.import_transactions(transactions, fetch_account)
self.miner.import_transactions(self, transactions, fetch_account)
}
fn queue_transactions(&self, transactions: Vec<Bytes>) {
@ -807,6 +808,7 @@ impl<V> MiningBlockChainClient for Client<V> where V: Verifier {
self.state_db.lock().unwrap().boxed_clone(),
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
self.build_last_hashes(h.clone()),
self.dao_rescue_block_gas_limit(),
author,
gas_floor_target,
extra_data,

View File

@ -42,7 +42,7 @@ use header::{BlockNumber, Header};
use transaction::{LocalizedTransaction, SignedTransaction};
use log_entry::LocalizedLogEntry;
use filter::Filter;
use views::BlockView;
use views::{HeaderView, BlockView};
use error::{ImportResult, ExecutionError};
use receipt::LocalizedReceipt;
use trace::LocalizedTrace;
@ -224,6 +224,13 @@ pub trait BlockChainClient : Sync + Send {
Err(())
}
}
/// Get `Some` gas limit of block 1_760_000, or `None` if chain is not yet that long.
fn dao_rescue_block_gas_limit(&self) -> Option<U256> {
self.block_header(BlockID::Number(1_760_000))
.map(|header| HeaderView::new(&header).gas_limit())
}
}
/// Extended client interface used for mining

View File

@ -490,7 +490,7 @@ impl BlockChainClient for TestBlockChainClient {
balance: balances[a],
};
self.miner.import_transactions(transactions, &fetch_account)
self.miner.import_transactions(self, transactions, &fetch_account)
}
fn queue_transactions(&self, transactions: Vec<Bytes>) {

View File

@ -39,6 +39,9 @@ pub struct EnvInfo {
pub last_hashes: LastHashes,
/// The gas used.
pub gas_used: U256,
/// Block gas limit at DAO rescue block #1760000 or None if not yet there.
pub dao_rescue_block_gas_limit: Option<U256>,
}
impl Default for EnvInfo {
@ -51,6 +54,7 @@ impl Default for EnvInfo {
gas_limit: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
dao_rescue_block_gas_limit: None,
}
}
}
@ -66,6 +70,7 @@ impl From<ethjson::vm::Env> for EnvInfo {
timestamp: e.timestamp.into(),
last_hashes: (1..cmp::min(number + 1, 257)).map(|i| format!("{}", number - i).as_bytes().sha3()).collect(),
gas_used: U256::zero(),
dao_rescue_block_gas_limit: None,
}
}
}

View File

@ -41,6 +41,8 @@ pub struct EthashParams {
pub registrar: Address,
/// Homestead transition block number.
pub frontier_compatibility_mode_limit: u64,
/// Enable the soft-fork logic.
pub dao_rescue_soft_fork: bool,
}
impl From<ethjson::spec::EthashParams> for EthashParams {
@ -53,6 +55,7 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
block_reward: p.block_reward.into(),
registrar: p.registrar.into(),
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.into(),
dao_rescue_soft_fork: p.dao_rescue_soft_fork.into(),
}
}
}
@ -102,8 +105,9 @@ impl Engine for Ethash {
Schedule::new_frontier()
} else {
let mut s = Schedule::new_homestead();
// TODO: make dependent on gaslimit > 4000000 of block 1760000.
s.reject_dao_transactions = env_info.number >= 1760000;
if self.ethash_params.dao_rescue_soft_fork {
s.reject_dao_transactions = env_info.dao_rescue_block_gas_limit.map(|x| x <= 4_000_000.into()).unwrap_or(false);
}
s
}
}
@ -319,7 +323,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), 3141562.into(), vec![]).unwrap();
let b = b.close();
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
}
@ -334,7 +338,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]).unwrap();
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), 3141562.into(), vec![]).unwrap();
let mut uncle = Header::new();
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
uncle.author = uncle_author.clone();
@ -362,7 +366,8 @@ mod tests {
difficulty: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
dao_rescue_block_gas_limit: None,
});
assert!(schedule.stack_limit > 0);
@ -374,7 +379,8 @@ mod tests {
difficulty: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
dao_rescue_block_gas_limit: None,
});
assert!(!schedule.have_delegate_call);

View File

@ -33,7 +33,12 @@ use super::spec::*;
pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) }
/// Create a new Frontier mainnet chain spec.
pub fn new_frontier() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier.json")) }
pub fn new_frontier(dao_rescue: bool) -> Spec {
Spec::load(match dao_rescue {
true => include_bytes!("../../res/ethereum/frontier_dao_rescue.json"),
false => include_bytes!("../../res/ethereum/frontier.json"),
})
}
/// Create a new Frontier chain spec as though it never changes to Homestead.
pub fn new_frontier_test() -> Spec { Spec::load(include_bytes!("../../res/ethereum/frontier_test.json")) }
@ -84,7 +89,7 @@ mod tests {
#[test]
fn frontier() {
let frontier = new_frontier();
let frontier = new_frontier(true);
assert_eq!(frontier.state_root(), H256::from_str("d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544").unwrap());
let genesis = frontier.genesis_block();

View File

@ -318,7 +318,8 @@ mod tests {
difficulty: 0.into(),
last_hashes: vec![],
gas_used: 0.into(),
gas_limit: 0.into()
gas_limit: 0.into(),
dao_rescue_block_gas_limit: None,
}
}

View File

@ -106,34 +106,37 @@ impl Miner {
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn prepare_sealing(&self, chain: &MiningBlockChainClient) {
trace!(target: "miner", "prepare_sealing: entering");
let transactions = self.transaction_queue.lock().unwrap().top_transactions();
let mut sealing_work = self.sealing_work.lock().unwrap();
let best_hash = chain.best_block_header().sha3();
let (transactions, mut open_block) = {
let transactions = {self.transaction_queue.lock().unwrap().top_transactions()};
let mut sealing_work = self.sealing_work.lock().unwrap();
let best_hash = chain.best_block_header().sha3();
/*
// check to see if last ClosedBlock in would_seals is actually same parent block.
// if so
// duplicate, re-open and push any new transactions.
// if at least one was pushed successfully, close and enqueue new ClosedBlock;
// otherwise, leave everything alone.
// otherwise, author a fresh block.
// check to see if last ClosedBlock in would_seals is actually same parent block.
// if so
// duplicate, re-open and push any new transactions.
// if at least one was pushed successfully, close and enqueue new ClosedBlock;
// otherwise, leave everything alone.
// otherwise, author a fresh block.
*/
let mut open_block = match sealing_work.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning");
// add transactions to old_block
let e = self.engine();
old_block.reopen(e, chain.vm_factory())
}
None => {
// block not found - create it.
trace!(target: "miner", "No existing work - making new block");
chain.prepare_open_block(
self.author(),
self.gas_floor_target(),
self.extra_data()
)
}
let open_block = match sealing_work.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning");
// add transactions to old_block
let e = self.engine();
old_block.reopen(e, chain.vm_factory())
}
None => {
// block not found - create it.
trace!(target: "miner", "No existing work - making new block");
chain.prepare_open_block(
self.author(),
self.gas_floor_target(),
self.extra_data()
)
}
};
(transactions, open_block)
};
let mut invalid_transactions = HashSet::new();
@ -163,14 +166,16 @@ impl Miner {
let block = open_block.close();
let mut queue = self.transaction_queue.lock().unwrap();
let fetch_account = |a: &Address| AccountDetails {
nonce: chain.latest_nonce(a),
balance: chain.latest_balance(a),
};
for hash in invalid_transactions.into_iter() {
queue.remove_invalid(&hash, &fetch_account);
{
let mut queue = self.transaction_queue.lock().unwrap();
for hash in invalid_transactions.into_iter() {
queue.remove_invalid(&hash, &fetch_account);
}
}
if !block.transactions().is_empty() {
@ -196,6 +201,8 @@ impl Miner {
trace!(target: "miner", "prepare_sealing: unable to generate seal internally");
}
}
let mut sealing_work = self.sealing_work.lock().unwrap();
if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) {
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
sealing_work.push(block);
@ -267,6 +274,7 @@ impl MinerService for Miner {
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: U256::max_value(),
dao_rescue_block_gas_limit: chain.dao_rescue_block_gas_limit(),
};
// that's just a copy of the state.
let mut state = block.state().clone();
@ -376,13 +384,19 @@ impl MinerService for Miner {
*self.gas_floor_target.read().unwrap()
}
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, fetch_account: T) ->
fn import_transactions<T>(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
Vec<Result<TransactionImportResult, Error>>
where T: Fn(&Address) -> AccountDetails {
let mut transaction_queue = self.transaction_queue.lock().unwrap();
transactions.into_iter()
.map(|tx| transaction_queue.add(tx, &fetch_account, TransactionOrigin::External))
.collect()
let results: Vec<Result<TransactionImportResult, Error>> = {
let mut transaction_queue = self.transaction_queue.lock().unwrap();
transactions.into_iter()
.map(|tx| transaction_queue.add(tx, &fetch_account, TransactionOrigin::External))
.collect()
};
if !results.is_empty() {
self.update_sealing(chain);
}
results
}
fn import_own_transaction<T>(
@ -564,7 +578,7 @@ impl MinerService for Miner {
for tx in &txs {
let _sender = tx.sender();
}
let _ = self.import_transactions(txs, |a| AccountDetails {
let _ = self.import_transactions(chain, txs, |a| AccountDetails {
nonce: chain.latest_nonce(a),
balance: chain.latest_balance(a),
});

View File

@ -94,7 +94,7 @@ pub trait MinerService : Send + Sync {
fn set_transactions_limit(&self, limit: usize);
/// Imports transactions to transaction queue.
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, fetch_account: T) ->
fn import_transactions<T>(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
Vec<Result<TransactionImportResult, Error>>
where T: Fn(&Address) -> AccountDetails, Self: Sized;

View File

@ -657,8 +657,14 @@ impl TransactionQueue {
.cloned()
.map_or(state_nonce, |n| n + U256::one());
// Check height
if nonce > next_nonce {
// The transaction might be old, let's check that.
// This has to be the first test, otherwise calculating
// nonce height would result in overflow.
if nonce < state_nonce {
// Droping transaction
trace!(target: "miner", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce);
return Err(TransactionError::Old);
} else if nonce > next_nonce {
// We have a gap - put to future.
// Update nonces of transactions in future (remove old transactions)
self.update_future(&address, state_nonce);
@ -667,12 +673,7 @@ impl TransactionQueue {
// Return an error if this transaction is not imported because of limit.
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash)));
return Ok(TransactionImportResult::Future);
} else if nonce < state_nonce {
// Droping transaction
trace!(target: "miner", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce);
return Err(TransactionError::Old);
}
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash)));
// Keep track of highest nonce stored in current
let new_max = self.last_nonces.get(&address).map_or(nonce, |n| cmp::max(nonce, *n));

View File

@ -222,19 +222,25 @@ impl State {
let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true };
let e = try!(Executive::new(self, env_info, engine, vm_factory).transact(t, options));
let broken_dao = H256::from("7278d050619a624f84f51987149ddb439cdaadfba5966f7cfaea7ad44340a4ba");
let broken_dao = H256::from("6a5d24750f78441e56fec050dc52fe8e911976485b7472faac7464a176a67caa");
// dao attack soft fork
if engine.schedule(&env_info).reject_dao_transactions {
// collect all the addresses which have changed.
let addresses = self.cache.borrow().iter().map(|(addr, _)| addr.clone()).collect::<Vec<_>>();
let whitelisted = if let Action::Call(to) = t.action {
to == Address::from("Da4a4626d3E16e094De3225A751aAb7128e96526") ||
to == Address::from("2ba9D006C1D72E67A70b5526Fc6b4b0C0fd6D334")
} else { false };
if !whitelisted {
// collect all the addresses which have changed.
let addresses = self.cache.borrow().iter().map(|(addr, _)| addr.clone()).collect::<Vec<_>>();
for a in &addresses {
if self.code(a).map_or(false, |c| c.sha3() == broken_dao) {
// Figure out if the balance has been reduced.
let maybe_original = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR).get(&a).map(Account::from_rlp);
if maybe_original.map_or(false, |original| *original.balance() > self.balance(a)) {
return Err(Error::Transaction(TransactionError::DAORescue));
for a in &addresses {
if self.code(a).map_or(false, |c| c.sha3() == broken_dao) {
// Figure out if the balance has been reduced.
let maybe_original = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR).get(&a).map(Account::from_rlp);
if maybe_original.map_or(false, |original| *original.balance() > self.balance(a)) {
return Err(Error::Transaction(TransactionError::DAORescue));
}
}
}
}

View File

@ -179,6 +179,7 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
db,
&last_header,
last_hashes.clone(),
None,
author.clone(),
3141562.into(),
vec![]

View File

@ -53,7 +53,8 @@ mod tests {
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit" : "0x"
"frontierCompatibilityModeLimit" : "0x",
"daoRescueSoftFork": true
}
}
}"#;

View File

@ -42,6 +42,9 @@ pub struct EthashParams {
/// Homestead transition block number.
#[serde(rename="frontierCompatibilityModeLimit")]
pub frontier_compatibility_mode_limit: Uint,
/// DAO rescue soft-fork?
#[serde(rename="daoRescueSoftFork")]
pub dao_rescue_soft_fork: bool,
}
/// Ethash engine deserialization.
@ -65,8 +68,9 @@ mod tests {
"difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit" : "0x42"
"registrar": "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0x42",
"daoRescueSoftFork": true
}
}"#;

View File

@ -63,7 +63,8 @@ mod tests {
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit" : "0x"
"frontierCompatibilityModeLimit" : "0x",
"daoRescueSoftFork": false
}
}
},

View File

@ -43,12 +43,20 @@ Protocol Options:
[default: $HOME/.parity/keys].
--identity NAME Specify your node's name.
DAO-Rescue Soft-fork Options:
--help-rescue-dao Does nothing - on by default.
--dont-help-rescue-dao Votes against the DAO-rescue soft-fork, but supports
it if it is triggered anyway.
Equivalent to --gas-floor-target=3141592.
--dogmatic Ignores all DAO-rescue soft-fork behaviour. Even if
it means losing mining rewards.
Account Options:
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
ACCOUNTS is a comma-delimited list of addresses.
--password FILE Provide a file containing a password for unlocking
an account.
--keys-iterations NUM Specify the number of iterations to use when
--keys-iterations NUM Specify the number of iterations to use when
deriving key from the password (bigger is more
secure) [default: 10240].
--no-import-keys Do not import keys from legacy clients.
@ -230,6 +238,8 @@ pub struct Args {
pub flag_chain: String,
pub flag_db_path: String,
pub flag_identity: String,
pub flag_dont_help_rescue_dao: bool,
pub flag_dogmatic: bool,
pub flag_unlock: Option<String>,
pub flag_password: Vec<String>,
pub flag_cache: Option<usize>,

View File

@ -75,12 +75,18 @@ impl Configuration {
}
pub fn gas_floor_target(&self) -> U256 {
let d = &self.args.flag_gas_floor_target;
U256::from_dec_str(d).unwrap_or_else(|_| {
die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d)
})
if self.args.flag_dont_help_rescue_dao || self.args.flag_dogmatic {
4_700_000.into()
} else {
let d = &self.args.flag_gas_floor_target;
U256::from_dec_str(d).unwrap_or_else(|_| {
die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d)
})
}
}
pub fn gas_price(&self) -> U256 {
match self.args.flag_gasprice.as_ref() {
Some(d) => {
@ -115,16 +121,20 @@ impl Configuration {
}
pub fn extra_data(&self) -> Bytes {
match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) {
Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
None => version_data(),
Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
if !self.args.flag_dont_help_rescue_dao {
(b"rescuedao"[..]).to_owned()
} else {
match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) {
Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
None => version_data(),
Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
}
}
}
pub fn spec(&self) -> Spec {
match self.chain().as_str() {
"frontier" | "homestead" | "mainnet" => ethereum::new_frontier(),
"frontier" | "homestead" | "mainnet" => ethereum::new_frontier(!self.args.flag_dogmatic),
"morden" | "testnet" => ethereum::new_morden(),
"olympic" => ethereum::new_olympic(),
f => Spec::load(contents(f).unwrap_or_else(|_| {
@ -155,7 +165,6 @@ impl Configuration {
pub fn init_reserved_nodes(&self) -> Vec<String> {
use std::fs::File;
use std::io::BufRead;
if let Some(ref path) = self.args.flag_reserved_peers {
let mut buffer = String::new();

View File

@ -150,7 +150,7 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
server.add_delegate(EthFilterClient::new(&deps.client, &deps.miner).to_delegate());
if deps.signer_port.is_some() {
server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.miner).to_delegate());
server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner).to_delegate());
} else {
server.add_delegate(EthSigningUnsafeClient::new(&deps.client, &deps.secret_store, &deps.miner).to_delegate());
}
@ -162,7 +162,8 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_queue).to_delegate());
},
Api::Ethcore => {
server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, deps.logger.clone(), deps.settings.clone()).to_delegate())
let queue = deps.signer_port.map(|_| deps.signer_queue.clone());
server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, deps.logger.clone(), deps.settings.clone(), queue).to_delegate())
},
Api::EthcoreSet => {
server.add_delegate(EthcoreSetClient::new(&deps.miner, &deps.net_service).to_delegate())

View File

@ -69,6 +69,12 @@ pub trait SigningQueue: Send + Sync {
/// Return copy of all the requests in the queue.
fn requests(&self) -> Vec<TransactionConfirmation>;
/// Returns number of transactions awaiting confirmation.
fn len(&self) -> usize;
/// Returns true if there are no transactions awaiting confirmation.
fn is_empty(&self) -> bool;
}
#[derive(Debug, PartialEq)]
@ -277,6 +283,16 @@ impl SigningQueue for ConfirmationsQueue {
let queue = self.queue.read().unwrap();
queue.values().map(|token| token.request.clone()).collect()
}
fn len(&self) -> usize {
let queue = self.queue.read().unwrap();
queue.len()
}
fn is_empty(&self) -> bool {
let queue = self.queue.read().unwrap();
queue.is_empty()
}
}

View File

@ -37,7 +37,7 @@ use ethcore::filter::Filter as EthcoreFilter;
use self::ethash::SeedHashCompute;
use v1::traits::Eth;
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, OptionalValue, Index, Filter, Log, Receipt};
use v1::impls::{dispatch_transaction, error_codes};
use v1::impls::{default_gas_price, dispatch_transaction, error_codes};
use serde;
/// Eth rpc implementation.
@ -153,23 +153,14 @@ impl<C, S, M, EM> EthClient<C, S, M, EM> where
}
}
fn default_gas_price(&self) -> Result<U256, Error> {
let miner = take_weak!(self.miner);
Ok(take_weak!(self.client)
.gas_price_statistics(100, 8)
.map(|x| x[4])
.unwrap_or_else(|_| miner.sensible_gas_price())
)
}
fn sign_call(&self, request: CallRequest) -> Result<SignedTransaction, Error> {
let client = take_weak!(self.client);
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
let from = request.from.unwrap_or(Address::zero());
Ok(EthTransaction {
nonce: request.nonce.unwrap_or_else(|| client.latest_nonce(&from)),
action: request.to.map_or(Action::Create, Action::Call),
gas: request.gas.unwrap_or(U256::from(50_000_000)),
gas_price: request.gas_price.unwrap_or_else(|| self.default_gas_price().expect("call only fails if client or miner are unavailable; client and miner are both available to be here; qed")),
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(&*client, &*miner)),
value: request.value.unwrap_or_else(U256::zero),
data: request.data.map_or_else(Vec::new, |d| d.to_vec())
}.fake_sign(from))
@ -296,7 +287,10 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
fn gas_price(&self, params: Params) -> Result<Value, Error> {
match params {
Params::None => to_value(&try!(self.default_gas_price())),
Params::None => {
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
to_value(&default_gas_price(&*client, &*miner))
}
_ => Err(Error::invalid_params())
}
}

View File

@ -25,38 +25,42 @@ use ethcore::account_provider::AccountProvider;
use v1::helpers::{SigningQueue, ConfirmationsQueue};
use v1::traits::EthSigning;
use v1::types::{TransactionRequest, Bytes};
use v1::impls::sign_and_dispatch;
use v1::impls::{default_gas_price, sign_and_dispatch};
fn fill_optional_fields<C, M>(request: &mut TransactionRequest, client: &C, miner: &M)
where C: MiningBlockChainClient, M: MinerService {
if request.gas.is_none() {
request.gas = Some(miner.sensible_gas_limit());
}
if request.gas_price.is_none() {
request.gas_price = Some(default_gas_price(client, miner));
}
if request.data.is_none() {
request.data = Some(Bytes::new(Vec::new()));
}
}
/// Implementation of functions that require signing when no trusted signer is used.
pub struct EthSigningQueueClient<M: MinerService> {
pub struct EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: MinerService {
queue: Weak<ConfirmationsQueue>,
client: Weak<C>,
miner: Weak<M>,
}
impl<M: MinerService> EthSigningQueueClient<M> {
impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: MinerService {
/// Creates a new signing queue client given shared signing queue.
pub fn new(queue: &Arc<ConfirmationsQueue>, miner: &Arc<M>) -> Self {
pub fn new(queue: &Arc<ConfirmationsQueue>, client: &Arc<C>, miner: &Arc<M>) -> Self {
EthSigningQueueClient {
queue: Arc::downgrade(queue),
client: Arc::downgrade(client),
miner: Arc::downgrade(miner),
}
}
fn fill_optional_fields(&self, miner: Arc<M>, mut request: TransactionRequest) -> TransactionRequest {
if let None = request.gas {
request.gas = Some(miner.sensible_gas_limit());
}
if let None = request.gas_price {
request.gas_price = Some(miner.sensible_gas_price());
}
if let None = request.data {
request.data = Some(Bytes::new(Vec::new()));
}
request
}
}
impl<M: MinerService + 'static> EthSigning for EthSigningQueueClient<M> {
impl<C, M> EthSigning for EthSigningQueueClient<C, M>
where C: MiningBlockChainClient + 'static, M: MinerService + 'static
{
fn sign(&self, _params: Params) -> Result<Value, Error> {
warn!("Invoking eth_sign is not yet supported with signer enabled.");
@ -66,10 +70,11 @@ impl<M: MinerService + 'static> EthSigning for EthSigningQueueClient<M> {
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
from_params::<(TransactionRequest, )>(params)
.and_then(|(request, )| {
.and_then(|(mut request, )| {
let queue = take_weak!(self.queue);
let miner = take_weak!(self.miner);
let request = self.fill_optional_fields(miner, request);
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
fill_optional_fields(&mut request, &*client, &*miner);
let id = queue.add_request(request);
let result = id.wait_with_timeout();
result.unwrap_or_else(|| to_value(&H256::new()))

View File

@ -26,6 +26,8 @@ use jsonrpc_core::*;
use ethcore::miner::MinerService;
use v1::traits::Ethcore;
use v1::types::{Bytes};
use v1::helpers::{SigningQueue, ConfirmationsQueue};
use v1::impls::error_codes;
/// Ethcore implementation.
pub struct EthcoreClient<C, M> where
@ -36,16 +38,18 @@ pub struct EthcoreClient<C, M> where
miner: Weak<M>,
logger: Arc<RotatingLogger>,
settings: Arc<NetworkSettings>,
confirmations_queue: Option<Arc<ConfirmationsQueue>>,
}
impl<C, M> EthcoreClient<C, M> where C: MiningBlockChainClient, M: MinerService {
/// Creates new `EthcoreClient`.
pub fn new(client: &Arc<C>, miner: &Arc<M>, logger: Arc<RotatingLogger>, settings: Arc<NetworkSettings>) -> Self {
pub fn new(client: &Arc<C>, miner: &Arc<M>, logger: Arc<RotatingLogger>, settings: Arc<NetworkSettings>, queue: Option<Arc<ConfirmationsQueue>>) -> Self {
EthcoreClient {
client: Arc::downgrade(client),
miner: Arc::downgrade(miner),
logger: logger,
settings: settings,
confirmations_queue: queue,
}
}
}
@ -120,4 +124,15 @@ impl<C, M> Ethcore for EthcoreClient<C, M> where M: MinerService + 'static, C: M
_ => Err(Error::invalid_params()),
}
}
fn unsigned_transactions_count(&self, _params: Params) -> Result<Value, Error> {
match self.confirmations_queue {
None => Err(Error {
code: ErrorCode::ServerError(error_codes::SIGNER_DISABLED),
message: "Trusted Signer is disabled. This API is not available.".into(),
data: None
}),
Some(ref queue) => to_value(&queue.len()),
}
}
}

View File

@ -71,6 +71,7 @@ mod error_codes {
pub const UNKNOWN_ERROR: i64 = -32002;
pub const TRANSACTION_ERROR: i64 = -32010;
pub const ACCOUNT_LOCKED: i64 = -32020;
pub const SIGNER_DISABLED: i64 = -32030;
}
fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result<Value, Error>
@ -99,7 +100,7 @@ fn prepare_transaction<C, M>(client: &C, miner: &M, request: TransactionRequest)
action: request.to.map_or(Action::Create, Action::Call),
gas: request.gas.unwrap_or_else(|| miner.sensible_gas_limit()),
gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()),
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(client, miner)),
value: request.value.unwrap_or_else(U256::zero),
data: request.data.map_or_else(Vec::new, |b| b.to_vec()),
}
@ -133,6 +134,14 @@ fn sign_and_dispatch<C, M>(client: &C, miner: &M, request: TransactionRequest, a
dispatch_transaction(&*client, &*miner, signed_transaction)
}
fn default_gas_price<C, M>(client: &C, miner: &M) -> U256 where C: MiningBlockChainClient, M: MinerService {
client
.gas_price_statistics(100, 8)
.map(|x| x[4])
.unwrap_or_else(|_| miner.sensible_gas_price())
}
fn signing_error(error: AccountError) -> Error {
Error {
code: ErrorCode::ServerError(error_codes::ACCOUNT_LOCKED),

View File

@ -183,7 +183,8 @@ const TRANSACTION_COUNT_SPEC: &'static [u8] = br#"{
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
"daoRescueSoftFork": false
}
}
},

View File

@ -115,7 +115,7 @@ impl MinerService for TestMinerService {
}
/// Imports transactions to transaction queue.
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, fetch_account: T) ->
fn import_transactions<T>(&self, _chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
Vec<Result<TransactionImportResult, Error>>
where T: Fn(&Address) -> AccountDetails {
// lets assume that all txs are valid

View File

@ -21,9 +21,11 @@ use v1::traits::EthSigning;
use v1::helpers::{ConfirmationsQueue, SigningQueue};
use v1::tests::helpers::TestMinerService;
use util::{Address, FixedHash};
use ethcore::client::TestBlockChainClient;
struct EthSigningTester {
pub queue: Arc<ConfirmationsQueue>,
pub client: Arc<TestBlockChainClient>,
pub miner: Arc<TestMinerService>,
pub io: IoHandler,
}
@ -31,12 +33,14 @@ struct EthSigningTester {
impl Default for EthSigningTester {
fn default() -> Self {
let queue = Arc::new(ConfirmationsQueue::default());
let client = Arc::new(TestBlockChainClient::default());
let miner = Arc::new(TestMinerService::default());
let io = IoHandler::new();
io.add_delegate(EthSigningQueueClient::new(&queue, &miner).to_delegate());
io.add_delegate(EthSigningQueueClient::new(&queue, &client, &miner).to_delegate());
EthSigningTester {
queue: queue,
client: client,
miner: miner,
io: io,
}

View File

@ -15,17 +15,12 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::str::FromStr;
use jsonrpc_core::IoHandler;
use v1::{Ethcore, EthcoreClient, EthcoreSet, EthcoreSetClient};
use ethcore::miner::MinerService;
use ethcore::service::SyncMessage;
use v1::{Ethcore, EthcoreClient};
use v1::tests::helpers::TestMinerService;
use v1::helpers::ConfirmationsQueue;
use ethcore::client::{TestBlockChainClient};
use util::numbers::*;
use rustc_serialize::hex::FromHex;
use util::log::RotatingLogger;
use util::network::{NetworkConfiguration, NetworkService};
use util::network_settings::NetworkSettings;
fn miner_service() -> Arc<TestMinerService> {
@ -52,26 +47,16 @@ fn settings() -> Arc<NetworkSettings> {
})
}
fn network_service() -> Arc<NetworkService<SyncMessage>> {
Arc::new(NetworkService::new(NetworkConfiguration::new()).unwrap())
}
fn ethcore_client(client: &Arc<TestBlockChainClient>, miner: &Arc<TestMinerService>) -> EthcoreClient<TestBlockChainClient, TestMinerService> {
EthcoreClient::new(client, miner, logger(), settings())
}
fn ethcore_set_client(miner: &Arc<TestMinerService>, net: &Arc<NetworkService<SyncMessage>>) -> EthcoreSetClient<TestMinerService> {
EthcoreSetClient::new(miner, net)
EthcoreClient::new(client, miner, logger(), settings(), None)
}
#[test]
fn rpc_ethcore_extra_data() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_extraData", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x01020304","id":1}"#;
@ -86,10 +71,8 @@ fn rpc_ethcore_default_extra_data() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_defaultExtraData", "params": [], "id": 1}"#;
let response = format!(r#"{{"jsonrpc":"2.0","result":"0x{}","id":1}}"#, misc::version_data().to_hex());
@ -101,10 +84,8 @@ fn rpc_ethcore_default_extra_data() {
fn rpc_ethcore_gas_floor_target() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_gasFloorTarget", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x3039","id":1}"#;
@ -116,10 +97,8 @@ fn rpc_ethcore_gas_floor_target() {
fn rpc_ethcore_min_gas_price() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_minGasPrice", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x01312d00","id":1}"#;
@ -127,82 +106,16 @@ fn rpc_ethcore_min_gas_price() {
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}
#[test]
fn rpc_ethcore_set_min_gas_price() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setMinGasPrice", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.minimal_gas_price(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
}
#[test]
fn rpc_ethcore_set_gas_floor_target() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setGasFloorTarget", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.gas_floor_target(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
}
#[test]
fn rpc_ethcore_set_extra_data() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setExtraData", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.extra_data(), "cd1722f3947def4cf144679da39c4c32bdc35681".from_hex().unwrap());
}
#[test]
fn rpc_ethcore_set_author() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setAuthor", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.author(), Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
}
#[test]
fn rpc_ethcore_dev_logs() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let logger = logger();
logger.append("a".to_owned());
logger.append("b".to_owned());
let ethcore = EthcoreClient::new(&client, &miner, logger.clone(), settings()).to_delegate();
let ethcore = EthcoreClient::new(&client, &miner, logger.clone(), settings(), None).to_delegate();
let io = IoHandler::new();
io.add_delegate(ethcore);
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_devLogs", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":["b","a"],"id":1}"#;
@ -214,40 +127,21 @@ fn rpc_ethcore_dev_logs() {
fn rpc_ethcore_dev_logs_levels() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_devLogsLevels", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"rpc=trace","id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}
#[test]
fn rpc_ethcore_set_transactions_limit() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setTransactionsLimit", "params":[10240240], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.transactions_limit(), 10_240_240);
}
#[test]
fn rpc_ethcore_transactions_limit() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_transactionsLimit", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":1024,"id":1}"#;
@ -259,10 +153,8 @@ fn rpc_ethcore_transactions_limit() {
fn rpc_ethcore_net_chain() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_netChain", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"testchain","id":1}"#;
@ -274,10 +166,8 @@ fn rpc_ethcore_net_chain() {
fn rpc_ethcore_net_max_peers() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_netMaxPeers", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":25,"id":1}"#;
@ -289,10 +179,8 @@ fn rpc_ethcore_net_max_peers() {
fn rpc_ethcore_net_port() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_netPort", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":30303,"id":1}"#;
@ -304,10 +192,8 @@ fn rpc_ethcore_net_port() {
fn rpc_ethcore_rpc_settings() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_rpcSettings", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":{"enabled":true,"interface":"all","port":8545},"id":1}"#;
@ -319,13 +205,39 @@ fn rpc_ethcore_rpc_settings() {
fn rpc_ethcore_node_name() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_nodeName", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"mynode","id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}
#[test]
fn rpc_ethcore_unsigned_transactions_count() {
let miner = miner_service();
let client = client_service();
let io = IoHandler::new();
let queue = Arc::new(ConfirmationsQueue::default());
let ethcore = EthcoreClient::new(&client, &miner, logger(), settings(), Some(queue)).to_delegate();
io.add_delegate(ethcore);
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":0,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}
#[test]
fn rpc_ethcore_unsigned_transactions_count_when_signer_disabled() {
let miner = miner_service();
let client = client_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","error":{"code":-32030,"message":"Trusted Signer is disabled. This API is not available.","data":null},"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}

View File

@ -0,0 +1,107 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::str::FromStr;
use jsonrpc_core::IoHandler;
use v1::{EthcoreSet, EthcoreSetClient};
use ethcore::miner::MinerService;
use ethcore::service::SyncMessage;
use v1::tests::helpers::TestMinerService;
use util::numbers::*;
use util::network::{NetworkConfiguration, NetworkService};
use rustc_serialize::hex::FromHex;
fn miner_service() -> Arc<TestMinerService> {
Arc::new(TestMinerService::default())
}
fn network_service() -> Arc<NetworkService<SyncMessage>> {
Arc::new(NetworkService::new(NetworkConfiguration::new()).unwrap())
}
fn ethcore_set_client(miner: &Arc<TestMinerService>, net: &Arc<NetworkService<SyncMessage>>) -> EthcoreSetClient<TestMinerService> {
EthcoreSetClient::new(miner, net)
}
#[test]
fn rpc_ethcore_set_min_gas_price() {
let miner = miner_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setMinGasPrice", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.minimal_gas_price(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
}
#[test]
fn rpc_ethcore_set_gas_floor_target() {
let miner = miner_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setGasFloorTarget", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.gas_floor_target(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
}
#[test]
fn rpc_ethcore_set_extra_data() {
let miner = miner_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setExtraData", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.extra_data(), "cd1722f3947def4cf144679da39c4c32bdc35681".from_hex().unwrap());
}
#[test]
fn rpc_ethcore_set_author() {
let miner = miner_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setAuthor", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.author(), Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
}
#[test]
fn rpc_ethcore_set_transactions_limit() {
let miner = miner_service();
let network = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setTransactionsLimit", "params":[10240240], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
assert_eq!(miner.transactions_limit(), 10_240_240);
}

View File

@ -24,4 +24,5 @@ mod web3;
mod personal;
mod personal_signer;
mod ethcore;
mod ethcore_set;
mod rpc;

View File

@ -60,6 +60,10 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
/// Returns distribution of gas price in latest blocks.
fn gas_price_statistics(&self, _: Params) -> Result<Value, Error>;
/// Returns number of unsigned transactions waiting in the signer queue (if signer enabled)
/// Returns error when signer is disabled
fn unsigned_transactions_count(&self, _: Params) -> Result<Value, Error>;
/// Should be used to convert object to io delegate.
fn to_delegate(self) -> IoDelegate<Self> {
let mut delegate = IoDelegate::new(Arc::new(self));
@ -77,6 +81,7 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
delegate.add_method("ethcore_nodeName", Ethcore::node_name);
delegate.add_method("ethcore_defaultExtraData", Ethcore::default_extra_data);
delegate.add_method("ethcore_gasPriceStatistics", Ethcore::gas_price_statistics);
delegate.add_method("ethcore_unsignedTransactionsCount", Ethcore::unsigned_transactions_count);
delegate
}

View File

@ -295,6 +295,10 @@ impl BlockCollection {
let old_subchains: HashSet<_> = { self.heads.iter().cloned().collect() };
for s in self.heads.drain(..) {
let mut h = s.clone();
if !self.blocks.contains_key(&h) {
new_heads.push(h);
continue;
}
loop {
match self.parents.get(&h) {
Some(next) => {
@ -394,7 +398,7 @@ mod test {
assert_eq!(&bc.drain()[..], &blocks[6..16]);
assert_eq!(hashes[15], bc.heads[0]);
bc.insert_headers(headers[16..].to_vec());
bc.insert_headers(headers[15..].to_vec());
bc.drain();
assert!(bc.is_empty());
}
@ -420,5 +424,24 @@ mod test {
assert!(bc.head.is_some());
assert_eq!(hashes[21], bc.heads[0]);
}
#[test]
fn insert_headers_no_gap() {
let mut bc = BlockCollection::new();
assert!(is_empty(&bc));
let client = TestBlockChainClient::new();
let nblocks = 200;
client.add_blocks(nblocks, EachBlockWith::Nothing);
let blocks: Vec<_> = (0 .. nblocks).map(|i| (&client as &BlockChainClient).block(BlockID::Number(i as BlockNumber)).unwrap()).collect();
let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect();
let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect();
let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect();
bc.reset_to(heads);
bc.insert_headers(headers[1..2].to_vec());
assert!(bc.drain().is_empty());
bc.insert_headers(headers[0..1].to_vec());
assert_eq!(bc.drain().len(), 2);
}
}

View File

@ -100,6 +100,7 @@ use io::SyncIo;
use time;
use super::SyncConfig;
use blocks::BlockCollection;
use rand::{thread_rng, Rng};
known_heap_size!(0, PeerInfo);
@ -308,7 +309,6 @@ impl ChainSync {
}
self.syncing_difficulty = From::from(0u64);
self.state = SyncState::Idle;
self.blocks.clear();
self.active_peers = self.peers.keys().cloned().collect();
}
@ -393,7 +393,7 @@ impl ChainSync {
self.clear_peer_download(peer_id);
let expected_hash = self.peers.get(&peer_id).and_then(|p| p.asking_hash);
let expected_asking = if self.state == SyncState::ChainHead { PeerAsking::Heads } else { PeerAsking::BlockHeaders };
if !self.reset_peer_asking(peer_id, expected_asking) {
if !self.reset_peer_asking(peer_id, expected_asking) || expected_hash.is_none() {
trace!(target: "sync", "Ignored unexpected headers");
self.continue_sync(io);
return Ok(());
@ -533,10 +533,6 @@ impl ChainSync {
let header_rlp = try!(block_rlp.at(0));
let h = header_rlp.as_raw().sha3();
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
if self.state != SyncState::Idle {
trace!(target: "sync", "NewBlock ignored while seeking");
return Ok(());
}
let header: BlockHeader = try!(header_rlp.as_val());
let mut unknown = false;
{
@ -544,46 +540,45 @@ impl ChainSync {
peer.latest_hash = header.hash();
peer.latest_number = Some(header.number());
}
if header.number <= self.last_imported_block + 1 {
match io.chain().import_block(block_rlp.as_raw().to_vec()) {
Err(Error::Import(ImportError::AlreadyInChain)) => {
trace!(target: "sync", "New block already in chain {:?}", h);
},
Err(Error::Import(ImportError::AlreadyQueued)) => {
trace!(target: "sync", "New block already queued {:?}", h);
},
Ok(_) => {
if header.number == self.last_imported_block + 1 {
self.last_imported_block = header.number;
self.last_imported_hash = header.hash();
}
trace!(target: "sync", "New block queued {:?} ({})", h, header.number);
},
Err(Error::Block(BlockError::UnknownParent(p))) => {
unknown = true;
trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, h);
},
Err(e) => {
debug!(target: "sync", "Bad new block {:?} : {:?}", h, e);
io.disable_peer(peer_id);
}
};
}
else {
unknown = true;
}
if unknown {
trace!(target: "sync", "New unknown block {:?}", h);
//TODO: handle too many unknown blocks
let difficulty: U256 = try!(r.val_at(1));
if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
if peer.difficulty.map_or(true, |pd| difficulty > pd) {
//self.state = SyncState::ChainHead;
peer.difficulty = Some(difficulty);
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
match io.chain().import_block(block_rlp.as_raw().to_vec()) {
Err(Error::Import(ImportError::AlreadyInChain)) => {
trace!(target: "sync", "New block already in chain {:?}", h);
},
Err(Error::Import(ImportError::AlreadyQueued)) => {
trace!(target: "sync", "New block already queued {:?}", h);
},
Ok(_) => {
if header.number == self.last_imported_block + 1 {
self.last_imported_block = header.number;
self.last_imported_hash = header.hash();
}
trace!(target: "sync", "New block queued {:?} ({})", h, header.number);
},
Err(Error::Block(BlockError::UnknownParent(p))) => {
unknown = true;
trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, h);
},
Err(e) => {
debug!(target: "sync", "Bad new block {:?} : {:?}", h, e);
io.disable_peer(peer_id);
}
};
if unknown {
if self.state != SyncState::Idle {
trace!(target: "sync", "NewBlock ignored while seeking");
} else {
trace!(target: "sync", "New unknown block {:?}", h);
//TODO: handle too many unknown blocks
let difficulty: U256 = try!(r.val_at(1));
if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
if peer.difficulty.map_or(true, |pd| difficulty > pd) {
//self.state = SyncState::ChainHead;
peer.difficulty = Some(difficulty);
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
}
}
self.sync_peer(io, peer_id, true);
}
self.sync_peer(io, peer_id, true);
}
Ok(())
}
@ -661,7 +656,7 @@ impl ChainSync {
/// Resume downloading
fn continue_sync(&mut self, io: &mut SyncIo) {
let mut peers: Vec<(PeerId, U256)> = self.peers.iter().map(|(k, p)| (*k, p.difficulty.unwrap_or_else(U256::zero))).collect();
peers.sort_by(|&(_, d1), &(_, d2)| d1.cmp(&d2).reverse()); //TODO: sort by rating
thread_rng().shuffle(&mut peers); //TODO: sort by rating
trace!(target: "sync", "Syncing with {}/{} peers", self.active_peers.len(), peers.len());
for (p, _) in peers {
if self.active_peers.contains(&p) {
@ -687,7 +682,11 @@ impl ChainSync {
}
/// Find something to do for a peer. Called for a new peer or when a peer is done with it's task.
fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) {
fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) {
if !self.active_peers.contains(&peer_id) {
trace!(target: "sync", "Skipping deactivated peer");
return;
}
let (peer_latest, peer_difficulty) = {
let peer = self.peers.get_mut(&peer_id).unwrap();
if peer.asking != PeerAsking::Nothing {

View File

@ -44,8 +44,8 @@
//! let mut service = NetworkService::new(NetworkConfiguration::new()).unwrap();
//! service.start().unwrap();
//! let dir = env::temp_dir();
//! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, Arc::new(Miner::default()), service.io().channel()).unwrap();
//! let miner = Miner::new(false, ethereum::new_frontier());
//! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(true), &dir, Arc::new(Miner::default()), service.io().channel()).unwrap();
//! let miner = Miner::new(false, ethereum::new_frontier(true));
//! let sync = EthSync::new(SyncConfig::default(), client);
//! EthSync::register(&mut service, sync);
//! }

View File

@ -425,12 +425,16 @@ macro_rules! uint_overflowing_mul_reg {
let (c_u, overflow_u) = mul_u32(a, b_u, c_l >> 32);
ret[i + j] = (c_l & 0xFFFFFFFF) + (c_u << 32);
// Only single overflow possible here
let carry = (c_u >> 32) + (overflow_u << 32) + overflow_l + carry2;
let (carry, o) = carry.overflowing_add(ret[i + j + 1]);
// No overflow here
let res = (c_u >> 32) + (overflow_u << 32);
// possible overflows
let (res, o1) = res.overflowing_add(overflow_l);
let (res, o2) = res.overflowing_add(carry2);
let (res, o3) = res.overflowing_add(ret[i + j + 1]);
ret[i + j + 1] = res;
ret[i + j + 1] = carry;
carry2 = o as u64;
// Only single overflow possible there
carry2 = (o1 | o2 | o3) as u64;
}
}
@ -1305,12 +1309,16 @@ impl U256 {
let (c_u, overflow_u) = mul_u32(a, b_u, c_l >> 32);
ret[i + j] = (c_l & 0xFFFFFFFF) + (c_u << 32);
// Only single overflow possible here
let carry = (c_u >> 32) + (overflow_u << 32) + overflow_l + carry2;
let (carry, o) = carry.overflowing_add(ret[i + j + 1]);
// No overflow here
let res = (c_u >> 32) + (overflow_u << 32);
// possible overflows
let (res, o1) = res.overflowing_add(overflow_l);
let (res, o2) = res.overflowing_add(carry2);
let (res, o3) = res.overflowing_add(ret[i + j + 1]);
ret[i + j + 1] = res;
ret[i + j + 1] = carry;
carry2 = o as u64;
// Only single overflow possible there
carry2 = (o1 | o2 | o3) as u64;
}
}

View File

@ -109,6 +109,7 @@ impl Database {
/// Open database file. Creates if it does not exist.
pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> {
let mut opts = Options::new();
try!(opts.set_parsed_options("rate_limiter_bytes_per_sec=256000000"));
opts.set_max_open_files(config.max_open_files);
opts.create_if_missing(true);
opts.set_use_fsync(false);

View File

@ -191,7 +191,7 @@ pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'sta
sessions: Arc<RwLock<Slab<SharedSession>>>,
session: Option<SharedSession>,
session_id: Option<StreamToken>,
reserved_peers: &'s HashSet<NodeId>,
_reserved_peers: &'s HashSet<NodeId>,
}
impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, {
@ -207,7 +207,7 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone
session_id: id,
session: session,
sessions: sessions,
reserved_peers: reserved_peers,
_reserved_peers: reserved_peers,
}
}
@ -837,9 +837,9 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
let mut s = session.lock().unwrap();
if !s.expired() {
if s.is_ready() {
self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst);
for (p, _) in self.handlers.read().unwrap().iter() {
if s.have_capability(p) {
self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst);
to_disconnect.push(p);
}
}