Merge branch 'master' into ui-2

This commit is contained in:
Jaco Greeff 2017-10-04 12:31:09 +02:00
commit 55b0b09d6a
164 changed files with 10610 additions and 2896 deletions

472
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# [Parity](https://parity.io/parity.html) - fast, light, and robust Ethereum client # [Parity](https://parity.io/) - fast, light, and robust Ethereum client
[![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) [![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master)
[![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity) [![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity)

View File

@ -11,6 +11,8 @@ hash = { path = "../util/hash" }
primal = "0.2.3" primal = "0.2.3"
parking_lot = "0.4" parking_lot = "0.4"
crunchy = "0.1.0" crunchy = "0.1.0"
memmap = "0.5.2"
either = "1.0.0"
[features] [features]
benches = [] benches = []

352
ethash/src/cache.rs Normal file
View File

@ -0,0 +1,352 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use compute::Light;
use either::Either;
use keccak::{H256, keccak_512};
use memmap::{Mmap, Protection};
use parking_lot::Mutex;
use seed_compute::SeedHashCompute;
use shared::{ETHASH_CACHE_ROUNDS, NODE_BYTES, NODE_DWORDS, Node, epoch, get_cache_size, to_hex};
use std::borrow::Cow;
use std::fs;
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
use std::slice;
use std::sync::Arc;
type Cache = Either<Vec<Node>, Mmap>;
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
pub enum OptimizeFor {
Cpu,
Memory,
}
impl Default for OptimizeFor {
fn default() -> Self {
OptimizeFor::Cpu
}
}
fn byte_size(cache: &Cache) -> usize {
use self::Either::{Left, Right};
match *cache {
Left(ref vec) => vec.len() * NODE_BYTES,
Right(ref mmap) => mmap.len(),
}
}
fn new_buffer(path: &Path, num_nodes: usize, ident: &H256, optimize_for: OptimizeFor) -> Cache {
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => make_memmapped_cache(path, num_nodes, ident).ok(),
};
memmap.map(Either::Right).unwrap_or_else(|| {
Either::Left(make_memory_cache(num_nodes, ident))
})
}
#[derive(Clone)]
pub struct NodeCacheBuilder {
// TODO: Remove this locking and just use an `Rc`?
seedhash: Arc<Mutex<SeedHashCompute>>,
optimize_for: OptimizeFor,
}
// TODO: Abstract the "optimize for" logic
pub struct NodeCache {
builder: NodeCacheBuilder,
cache_dir: Cow<'static, Path>,
cache_path: PathBuf,
epoch: u64,
cache: Cache,
}
impl NodeCacheBuilder {
pub fn light(&self, cache_dir: &Path, block_number: u64) -> Light {
Light::new_with_builder(self, cache_dir, block_number)
}
pub fn light_from_file(&self, cache_dir: &Path, block_number: u64) -> io::Result<Light> {
Light::from_file_with_builder(self, cache_dir, block_number)
}
pub fn new<T: Into<Option<OptimizeFor>>>(optimize_for: T) -> Self {
NodeCacheBuilder {
seedhash: Arc::new(Mutex::new(SeedHashCompute::new())),
optimize_for: optimize_for.into().unwrap_or_default(),
}
}
fn block_number_to_ident(&self, block_number: u64) -> H256 {
self.seedhash.lock().hash_block_number(block_number)
}
fn epoch_to_ident(&self, epoch: u64) -> H256 {
self.seedhash.lock().hash_epoch(epoch)
}
pub fn from_file<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> io::Result<NodeCache> {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
let path = cache_path(cache_dir.as_ref(), &ident);
let cache = cache_from_path(&path, self.optimize_for)?;
let expected_cache_size = get_cache_size(block_number);
if byte_size(&cache) == expected_cache_size {
Ok(NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir,
cache_path: path,
cache: cache,
})
} else {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"Node cache is of incorrect size",
))
}
}
pub fn new_cache<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> NodeCache {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
let cache_size = get_cache_size(block_number);
// We use `debug_assert` since it is impossible for `get_cache_size` to return an unaligned
// value with the current implementation. If the implementation changes, CI will catch it.
debug_assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size");
let num_nodes = cache_size / NODE_BYTES;
let path = cache_path(cache_dir.as_ref(), &ident);
let nodes = new_buffer(&path, num_nodes, &ident, self.optimize_for);
NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir.into(),
cache_path: path,
cache: nodes,
}
}
}
impl NodeCache {
pub fn cache_path(&self) -> &Path {
&self.cache_path
}
pub fn flush(&mut self) -> io::Result<()> {
if let Some(last) = self.epoch.checked_sub(2).map(|ep| {
cache_path(self.cache_dir.as_ref(), &self.builder.epoch_to_ident(ep))
})
{
fs::remove_file(last).unwrap_or_else(|error| match error.kind() {
io::ErrorKind::NotFound => (),
_ => warn!("Error removing stale DAG cache: {:?}", error),
});
}
consume_cache(&mut self.cache, &self.cache_path)
}
}
fn make_memmapped_cache(path: &Path, num_nodes: usize, ident: &H256) -> io::Result<Mmap> {
use std::fs::OpenOptions;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len((num_nodes * NODE_BYTES) as _)?;
let mut memmap = Mmap::open(&file, Protection::ReadWrite)?;
unsafe { initialize_memory(memmap.mut_ptr() as *mut Node, num_nodes, ident) };
Ok(memmap)
}
fn make_memory_cache(num_nodes: usize, ident: &H256) -> Vec<Node> {
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
// Use uninit instead of unnecessarily writing `size_of::<Node>() * num_nodes` 0s
unsafe {
initialize_memory(nodes.as_mut_ptr(), num_nodes, ident);
nodes.set_len(num_nodes);
}
nodes
}
fn cache_path<'a, P: Into<Cow<'a, Path>>>(path: P, ident: &H256) -> PathBuf {
let mut buf = path.into().into_owned();
buf.push(to_hex(ident));
buf
}
fn consume_cache(cache: &mut Cache, path: &Path) -> io::Result<()> {
use std::fs::OpenOptions;
match *cache {
Either::Left(ref mut vec) => {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
let buf = unsafe {
slice::from_raw_parts_mut(vec.as_mut_ptr() as *mut u8, vec.len() * NODE_BYTES)
};
file.write_all(buf).map(|_| ())
}
Either::Right(ref mmap) => {
mmap.flush()
}
}
}
fn cache_from_path(path: &Path, optimize_for: OptimizeFor) -> io::Result<Cache> {
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => Mmap::open_path(path, Protection::ReadWrite).ok(),
};
memmap.map(Either::Right).ok_or(()).or_else(|_| {
read_from_path(path).map(Either::Left)
})
}
fn read_from_path(path: &Path) -> io::Result<Vec<Node>> {
use std::fs::File;
use std::mem;
let mut file = File::open(path)?;
let mut nodes: Vec<u8> = Vec::with_capacity(file.metadata().map(|m| m.len() as _).unwrap_or(
NODE_BYTES * 1_000_000,
));
file.read_to_end(&mut nodes)?;
nodes.shrink_to_fit();
if nodes.len() % NODE_BYTES != 0 || nodes.capacity() % NODE_BYTES != 0 {
return Err(io::Error::new(
io::ErrorKind::Other,
"Node cache is not a multiple of node size",
));
}
let out: Vec<Node> = unsafe {
Vec::from_raw_parts(
nodes.as_mut_ptr() as *mut _,
nodes.len() / NODE_BYTES,
nodes.capacity() / NODE_BYTES,
)
};
mem::forget(nodes);
Ok(out)
}
impl AsRef<[Node]> for NodeCache {
fn as_ref(&self) -> &[Node] {
match self.cache {
Either::Left(ref vec) => vec,
Either::Right(ref mmap) => unsafe {
let bytes = mmap.ptr();
// This isn't a safety issue, so we can keep this a debug lint. We don't care about
// people manually messing with the files unless it can cause unsafety, but if we're
// generating incorrect files then we want to catch that in CI.
debug_assert_eq!(mmap.len() % NODE_BYTES, 0);
slice::from_raw_parts(bytes as _, mmap.len() / NODE_BYTES)
},
}
}
}
// This takes a raw pointer and a counter because `memory` may be uninitialized. `memory` _must_ be
// a pointer to the beginning of an allocated but possibly-uninitialized block of
// `num_nodes * NODE_BYTES` bytes
//
// We have to use raw pointers to read/write uninit, using "normal" indexing causes LLVM to freak
// out. It counts as a read and causes all writes afterwards to be elided. Yes, really. I know, I
// want to refactor this to use less `unsafe` as much as the next rustacean.
unsafe fn initialize_memory(memory: *mut Node, num_nodes: usize, ident: &H256) {
let dst = memory as *mut u8;
debug_assert_eq!(ident.len(), 32);
keccak_512::unchecked(dst, NODE_BYTES, ident.as_ptr(), ident.len());
for i in 1..num_nodes {
// We use raw pointers here, see above
let dst = memory.offset(i as _) as *mut u8;
let src = memory.offset(i as isize - 1) as *mut u8;
keccak_512::unchecked(dst, NODE_BYTES, src, NODE_BYTES);
}
// Now this is initialized, we can treat it as a slice.
let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes);
// For `unroll!`, see below. If the literal in `unroll!` is not the same as the RHS here then
// these have got out of sync! Don't let this happen!
debug_assert_eq!(NODE_DWORDS, 8);
// This _should_ get unrolled by the compiler, since it's not using the loop variable.
for _ in 0..ETHASH_CACHE_ROUNDS {
for i in 0..num_nodes {
let data_idx = (num_nodes - 1 + i) % num_nodes;
let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes;
let data = {
let mut data: Node = nodes.get_unchecked(data_idx).clone();
let rhs: &Node = nodes.get_unchecked(idx);
unroll! {
for w in 0..8 {
*data.as_dwords_mut().get_unchecked_mut(w) ^=
*rhs.as_dwords().get_unchecked(w);
}
}
data
};
keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes);
}
}
}

View File

@ -19,30 +19,16 @@
// TODO: fix endianess for big endian // TODO: fix endianess for big endian
use primal::is_prime; use keccak::{keccak_512, keccak_256, H256};
use std::cell::Cell; use cache::{NodeCache, NodeCacheBuilder};
use seed_compute::SeedHashCompute;
use shared::*;
use std::io;
use std::mem; use std::mem;
use std::path::Path;
use std::ptr; use std::ptr;
use hash;
use std::slice;
use std::path::{Path, PathBuf};
use std::io::{self, Read, Write};
use std::fs::{self, File};
use parking_lot::Mutex;
pub const ETHASH_EPOCH_LENGTH: u64 = 30000;
pub const ETHASH_CACHE_ROUNDS: usize = 3;
pub const ETHASH_MIX_BYTES: usize = 128;
pub const ETHASH_ACCESSES: usize = 64;
pub const ETHASH_DATASET_PARENTS: u32 = 256;
const DATASET_BYTES_INIT: u64 = 1 << 30;
const DATASET_BYTES_GROWTH: u64 = 1 << 23;
const CACHE_BYTES_INIT: u64 = 1 << 24;
const CACHE_BYTES_GROWTH: u64 = 1 << 17;
const NODE_WORDS: usize = 64 / 4;
const NODE_BYTES: usize = 64;
const MIX_WORDS: usize = ETHASH_MIX_BYTES / 4; const MIX_WORDS: usize = ETHASH_MIX_BYTES / 4;
const MIX_NODES: usize = MIX_WORDS / NODE_WORDS; const MIX_NODES: usize = MIX_WORDS / NODE_WORDS;
const FNV_PRIME: u32 = 0x01000193; const FNV_PRIME: u32 = 0x01000193;
@ -55,48 +41,24 @@ pub struct ProofOfWork {
pub mix_hash: H256, pub mix_hash: H256,
} }
struct Node {
bytes: [u8; NODE_BYTES],
}
impl Default for Node {
fn default() -> Self {
Node { bytes: [0u8; NODE_BYTES] }
}
}
impl Clone for Node {
fn clone(&self) -> Self {
Node { bytes: *&self.bytes }
}
}
impl Node {
#[inline]
fn as_words(&self) -> &[u32; NODE_WORDS] {
unsafe { mem::transmute(&self.bytes) }
}
#[inline]
fn as_words_mut(&mut self) -> &mut [u32; NODE_WORDS] {
unsafe { mem::transmute(&mut self.bytes) }
}
}
pub type H256 = [u8; 32];
pub struct Light { pub struct Light {
cache_dir: PathBuf,
block_number: u64, block_number: u64,
cache: Vec<Node>, cache: NodeCache,
seed_compute: Mutex<SeedHashCompute>,
} }
/// Light cache structure /// Light cache structure
impl Light { impl Light {
/// Create a new light cache for a given block number pub fn new_with_builder(
pub fn new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light { builder: &NodeCacheBuilder,
light_new(cache_dir, block_number) cache_dir: &Path,
block_number: u64,
) -> Self {
let cache = builder.new_cache(cache_dir.to_path_buf(), block_number);
Light {
block_number: block_number,
cache: cache,
}
} }
/// Calculate the light boundary data /// Calculate the light boundary data
@ -106,107 +68,25 @@ impl Light {
light_compute(self, header_hash, nonce) light_compute(self, header_hash, nonce)
} }
pub fn file_path<T: AsRef<Path>>(cache_dir: T, seed_hash: H256) -> PathBuf { pub fn from_file_with_builder(
let mut cache_dir = cache_dir.as_ref().to_path_buf(); builder: &NodeCacheBuilder,
cache_dir.push(to_hex(&seed_hash)); cache_dir: &Path,
cache_dir block_number: u64,
} ) -> io::Result<Self> {
let cache = builder.from_file(cache_dir.to_path_buf(), block_number)?;
pub fn from_file<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> io::Result<Light> {
let seed_compute = SeedHashCompute::new();
let path = Light::file_path(&cache_dir, seed_compute.get_seedhash(block_number));
let mut file = File::open(path)?;
let cache_size = get_cache_size(block_number);
if file.metadata()?.len() != cache_size as u64 {
return Err(io::Error::new(io::ErrorKind::Other, "Cache file size mismatch"));
}
let num_nodes = cache_size / NODE_BYTES;
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
unsafe { nodes.set_len(num_nodes) };
let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) };
file.read_exact(buf)?;
Ok(Light { Ok(Light {
block_number, block_number: block_number,
cache_dir: cache_dir.as_ref().to_path_buf(), cache: cache,
cache: nodes,
seed_compute: Mutex::new(seed_compute),
}) })
} }
pub fn to_file(&self) -> io::Result<PathBuf> { pub fn to_file(&mut self) -> io::Result<&Path> {
let seed_compute = self.seed_compute.lock(); self.cache.flush()?;
let path = Light::file_path(&self.cache_dir, seed_compute.get_seedhash(self.block_number)); Ok(self.cache.cache_path())
if self.block_number >= ETHASH_EPOCH_LENGTH * 2 {
let deprecated = Light::file_path(
&self.cache_dir,
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2)
);
if deprecated.exists() {
debug!(target: "ethash", "removing: {:?}", &deprecated);
fs::remove_file(deprecated)?;
}
}
fs::create_dir_all(path.parent().unwrap())?;
let mut file = File::create(&path)?;
let cache_size = self.cache.len() * NODE_BYTES;
let buf = unsafe { slice::from_raw_parts(self.cache.as_ptr() as *const u8, cache_size) };
file.write(buf)?;
Ok(path)
} }
} }
pub struct SeedHashCompute { pub fn slow_hash_block_number(block_number: u64) -> H256 {
prev_epoch: Cell<u64>,
prev_seedhash: Cell<H256>,
}
impl SeedHashCompute {
#[inline]
pub fn new() -> SeedHashCompute {
SeedHashCompute {
prev_epoch: Cell::new(0),
prev_seedhash: Cell::new([0u8; 32]),
}
}
#[inline]
fn reset_cache(&self) {
self.prev_epoch.set(0);
self.prev_seedhash.set([0u8; 32]);
}
#[inline]
pub fn get_seedhash(&self, block_number: u64) -> H256 {
let epoch = block_number / ETHASH_EPOCH_LENGTH;
if epoch < self.prev_epoch.get() {
// can't build on previous hash if requesting an older block
self.reset_cache();
}
if epoch > self.prev_epoch.get() {
let seed_hash = SeedHashCompute::resume_compute_seedhash(self.prev_seedhash.get(), self.prev_epoch.get(), epoch);
self.prev_seedhash.set(seed_hash);
self.prev_epoch.set(epoch);
}
self.prev_seedhash.get()
}
#[inline]
pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 {
for _ in start_epoch..end_epoch {
unsafe { hash::keccak_256(hash[..].as_mut_ptr(), 32, hash[..].as_ptr(), 32) };
}
hash
}
}
pub fn slow_get_seedhash(block_number: u64) -> H256 {
SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH) SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH)
} }
@ -214,34 +94,6 @@ fn fnv_hash(x: u32, y: u32) -> u32 {
return x.wrapping_mul(FNV_PRIME) ^ y; return x.wrapping_mul(FNV_PRIME) ^ y;
} }
fn keccak_512(input: &[u8], output: &mut [u8]) {
unsafe { hash::keccak_512(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) };
}
fn keccak_512_inplace(input: &mut [u8]) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe { hash::keccak_512(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) };
}
fn get_cache_size(block_number: u64) -> usize {
let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - NODE_BYTES as u64;
while !is_prime(sz / NODE_BYTES as u64) {
sz = sz - 2 * NODE_BYTES as u64;
}
sz as usize
}
fn get_data_size(block_number: u64) -> usize {
let mut sz: u64 = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - ETHASH_MIX_BYTES as u64;
while !is_prime(sz / ETHASH_MIX_BYTES as u64) {
sz = sz - 2 * ETHASH_MIX_BYTES as u64;
}
sz as usize
}
/// Difficulty quick check for POW preverification /// Difficulty quick check for POW preverification
/// ///
/// `header_hash` The hash of the header /// `header_hash` The hash of the header
@ -261,12 +113,12 @@ pub fn quick_get_difficulty(header_hash: &H256, nonce: u64, mix_hash: &H256) ->
ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32); ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32);
ptr::copy_nonoverlapping(mem::transmute(&nonce), buf[32..].as_mut_ptr(), 8); ptr::copy_nonoverlapping(mem::transmute(&nonce), buf[32..].as_mut_ptr(), 8);
hash::keccak_512(buf.as_mut_ptr(), 64, buf.as_ptr(), 40); keccak_512::unchecked(buf.as_mut_ptr(), 64, buf.as_ptr(), 40);
ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32); ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32);
// This is initialized in `keccak_256` // This is initialized in `keccak_256`
let mut hash: [u8; 32] = mem::uninitialized(); let mut hash: [u8; 32] = mem::uninitialized();
hash::keccak_256(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len()); keccak_256::unchecked(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len());
hash hash
} }
@ -324,11 +176,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
// leaving it fully initialized. // leaving it fully initialized.
let mut out: [u8; NODE_BYTES] = mem::uninitialized(); let mut out: [u8; NODE_BYTES] = mem::uninitialized();
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(header_hash.as_ptr(), out.as_mut_ptr(), header_hash.len());
header_hash.as_ptr(),
out.as_mut_ptr(),
header_hash.len(),
);
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
mem::transmute(&nonce), mem::transmute(&nonce),
out[header_hash.len()..].as_mut_ptr(), out[header_hash.len()..].as_mut_ptr(),
@ -336,11 +184,11 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
); );
// compute keccak-512 hash and replicate across mix // compute keccak-512 hash and replicate across mix
hash::keccak_512( keccak_512::unchecked(
out.as_mut_ptr(), out.as_mut_ptr(),
NODE_BYTES, NODE_BYTES,
out.as_ptr(), out.as_ptr(),
header_hash.len() + mem::size_of::<u64>() header_hash.len() + mem::size_of::<u64>(),
); );
Node { bytes: out } Node { bytes: out }
@ -354,7 +202,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
let page_size = 4 * MIX_WORDS; let page_size = 4 * MIX_WORDS;
let num_full_pages = (full_size / page_size) as u32; let num_full_pages = (full_size / page_size) as u32;
// deref once for better performance // deref once for better performance
let cache: &[Node] = &light.cache; let cache: &[Node] = light.cache.as_ref();
let first_val = buf.half_mix.as_words()[0]; let first_val = buf.half_mix.as_words()[0];
debug_assert_eq!(MIX_NODES, 2); debug_assert_eq!(MIX_NODES, 2);
@ -364,14 +212,10 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
let index = { let index = {
// This is trivially safe, but does not work on big-endian. The safety of this is // This is trivially safe, but does not work on big-endian. The safety of this is
// asserted in debug builds (see the definition of `make_const_array!`). // asserted in debug builds (see the definition of `make_const_array!`).
let mix_words: &mut [u32; MIX_WORDS] = unsafe { let mix_words: &mut [u32; MIX_WORDS] =
make_const_array!(MIX_WORDS, &mut mix) unsafe { make_const_array!(MIX_WORDS, &mut mix) };
};
fnv_hash( fnv_hash(first_val ^ i, mix_words[i as usize % MIX_WORDS]) % num_full_pages
first_val ^ i,
mix_words[i as usize % MIX_WORDS]
) % num_full_pages
}; };
unroll! { unroll! {
@ -403,9 +247,8 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
// times and set each index, leaving the array fully initialized. THIS ONLY WORKS ON LITTLE- // times and set each index, leaving the array fully initialized. THIS ONLY WORKS ON LITTLE-
// ENDIAN MACHINES. See a future PR to make this and the rest of the code work correctly on // ENDIAN MACHINES. See a future PR to make this and the rest of the code work correctly on
// big-endian arches like mips. // big-endian arches like mips.
let mut compress: &mut [u32; MIX_WORDS / 4] = unsafe { let compress: &mut [u32; MIX_WORDS / 4] =
make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes) unsafe { make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes) };
};
// Compress mix // Compress mix
debug_assert_eq!(MIX_WORDS / 4, 8); debug_assert_eq!(MIX_WORDS / 4, 8);
@ -430,7 +273,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
// We overwrite the second half since `keccak_256` has an internal buffer and so allows // We overwrite the second half since `keccak_256` has an internal buffer and so allows
// overlapping arrays as input. // overlapping arrays as input.
let write_ptr: *mut u8 = mem::transmute(&mut buf.compress_bytes); let write_ptr: *mut u8 = mem::transmute(&mut buf.compress_bytes);
hash::keccak_256( keccak_256::unchecked(
write_ptr, write_ptr,
buf.compress_bytes.len(), buf.compress_bytes.len(),
read_ptr, read_ptr,
@ -439,25 +282,21 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
buf.compress_bytes buf.compress_bytes
}; };
ProofOfWork { ProofOfWork { mix_hash: mix_hash, value: value }
mix_hash: mix_hash,
value: value,
}
} }
// TODO: Use the `simd` crate
fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node { fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
let num_parent_nodes = cache.len(); let num_parent_nodes = cache.len();
let mut ret = cache[node_index as usize % num_parent_nodes].clone(); let mut ret = cache[node_index as usize % num_parent_nodes].clone();
ret.as_words_mut()[0] ^= node_index; ret.as_words_mut()[0] ^= node_index;
keccak_512_inplace(&mut ret.bytes); keccak_512::inplace(ret.as_bytes_mut());
debug_assert_eq!(NODE_WORDS, 16); debug_assert_eq!(NODE_WORDS, 16);
for i in 0..ETHASH_DATASET_PARENTS as u32 { for i in 0..ETHASH_DATASET_PARENTS as u32 {
let parent_index = fnv_hash( let parent_index = fnv_hash(node_index ^ i, ret.as_words()[i as usize % NODE_WORDS]) %
node_index ^ i, num_parent_nodes as u32;
ret.as_words()[i as usize % NODE_WORDS],
) % num_parent_nodes as u32;
let parent = &cache[parent_index as usize]; let parent = &cache[parent_index as usize];
unroll! { unroll! {
@ -467,69 +306,18 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
} }
} }
keccak_512_inplace(&mut ret.bytes); keccak_512::inplace(ret.as_bytes_mut());
ret ret
} }
fn light_new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light { #[cfg(test)]
let seed_compute = SeedHashCompute::new(); mod test {
let seedhash = seed_compute.get_seedhash(block_number); use super::*;
let cache_size = get_cache_size(block_number); use std::fs;
assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size"); #[test]
let num_nodes = cache_size / NODE_BYTES; fn test_get_cache_size() {
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
unsafe {
// Use uninit instead of unnecessarily writing `size_of::<Node>() * num_nodes` 0s
nodes.set_len(num_nodes);
keccak_512(&seedhash[0..32], &mut nodes.get_unchecked_mut(0).bytes);
for i in 1..num_nodes {
hash::keccak_512(nodes.get_unchecked_mut(i).bytes.as_mut_ptr(), NODE_BYTES, nodes.get_unchecked(i - 1).bytes.as_ptr(), NODE_BYTES);
}
debug_assert_eq!(NODE_WORDS, 16);
// This _should_ get unrolled by the compiler, since it's not using the loop variable.
for _ in 0..ETHASH_CACHE_ROUNDS {
for i in 0..num_nodes {
let idx = *nodes.get_unchecked_mut(i).as_words().get_unchecked(0) as usize % num_nodes;
let mut data = nodes.get_unchecked((num_nodes - 1 + i) % num_nodes).clone();
unroll! {
for w in 0..16 {
*data.as_words_mut().get_unchecked_mut(w) ^= *nodes.get_unchecked(idx).as_words().get_unchecked(w);
}
}
keccak_512(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes);
}
}
}
Light {
block_number,
cache_dir: cache_dir.as_ref().to_path_buf(),
cache: nodes,
seed_compute: Mutex::new(seed_compute),
}
}
static CHARS: &'static [u8] = b"0123456789abcdef";
fn to_hex(bytes: &[u8]) -> String {
let mut v = Vec::with_capacity(bytes.len() * 2);
for &byte in bytes.iter() {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
}
unsafe { String::from_utf8_unchecked(v) }
}
#[test]
fn test_get_cache_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes // https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(16776896usize, get_cache_size(0)); assert_eq!(16776896usize, get_cache_size(0));
assert_eq!(16776896usize, get_cache_size(1)); assert_eq!(16776896usize, get_cache_size(1));
@ -539,10 +327,10 @@ fn test_get_cache_size() {
assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH)); assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH)); assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1)); assert_eq!(285081536usize, get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1));
} }
#[test] #[test]
fn test_get_data_size() { fn test_get_data_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes // https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(1073739904usize, get_data_size(0)); assert_eq!(1073739904usize, get_data_size(0));
assert_eq!(1073739904usize, get_data_size(1)); assert_eq!(1073739904usize, get_data_size(1));
@ -551,75 +339,74 @@ fn test_get_data_size() {
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1)); assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH)); assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH)); assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH));
} }
#[test] #[test]
fn test_difficulty_test() { fn test_difficulty_test() {
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72]; let hash = [
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d]; 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let nonce = 0xd7b3ac70a301a249; let nonce = 0xd7b3ac70a301a249;
let boundary_good = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84]; let boundary_good = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert_eq!(quick_get_difficulty(&hash, nonce, &mix_hash)[..], boundary_good[..]); assert_eq!(quick_get_difficulty(&hash, nonce, &mix_hash)[..], boundary_good[..]);
let boundary_bad = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84]; let boundary_bad = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert!(quick_get_difficulty(&hash, nonce, &mix_hash)[..] != boundary_bad[..]); assert!(quick_get_difficulty(&hash, nonce, &mix_hash)[..] != boundary_bad[..]);
} }
#[test] #[test]
fn test_light_compute() { fn test_light_compute() {
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72]; let hash = [
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d]; 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
let boundary = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84]; 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let boundary = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
let nonce = 0xd7b3ac70a301a249; let nonce = 0xd7b3ac70a301a249;
// difficulty = 0x085657254bd9u64; // difficulty = 0x085657254bd9u64;
let light = Light::new(&::std::env::temp_dir(), 486382); let light = NodeCacheBuilder::new(None).light(&::std::env::temp_dir(), 486382);
let result = light_compute(&light, &hash, nonce); let result = light_compute(&light, &hash, nonce);
assert_eq!(result.mix_hash[..], mix_hash[..]); assert_eq!(result.mix_hash[..], mix_hash[..]);
assert_eq!(result.value[..], boundary[..]); assert_eq!(result.value[..], boundary[..]);
} }
#[test] #[test]
fn test_seed_compute_once() { fn test_drop_old_data() {
let seed_compute = SeedHashCompute::new();
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash);
}
#[test]
fn test_seed_compute_zero() {
let seed_compute = SeedHashCompute::new();
assert_eq!(seed_compute.get_seedhash(0), [0u8; 32]);
}
#[test]
fn test_seed_compute_after_older() {
let seed_compute = SeedHashCompute::new();
// calculating an older value first shouldn't affect the result
let _ = seed_compute.get_seedhash(50000);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash);
}
#[test]
fn test_seed_compute_after_newer() {
let seed_compute = SeedHashCompute::new();
// calculating an newer value first shouldn't affect the result
let _ = seed_compute.get_seedhash(972764);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash);
}
#[test]
fn test_drop_old_data() {
let path = ::std::env::temp_dir(); let path = ::std::env::temp_dir();
let first = Light::new(&path, 0).to_file().unwrap(); let builder = NodeCacheBuilder::new(None);
let first = builder.light(&path, 0).to_file().unwrap().to_owned();
let second = Light::new(&path, ETHASH_EPOCH_LENGTH).to_file().unwrap(); let second = builder.light(&path, ETHASH_EPOCH_LENGTH).to_file().unwrap().to_owned();
assert!(fs::metadata(&first).is_ok()); assert!(fs::metadata(&first).is_ok());
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 2).to_file(); let _ = builder.light(&path, ETHASH_EPOCH_LENGTH * 2).to_file();
assert!(fs::metadata(&first).is_err()); assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok()); assert!(fs::metadata(&second).is_ok());
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 3).to_file(); let _ = builder.light(&path, ETHASH_EPOCH_LENGTH * 3).to_file();
assert!(fs::metadata(&second).is_err()); assert!(fs::metadata(&second).is_err());
}
} }

52
ethash/src/keccak.rs Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate hash;
pub type H256 = [u8; 32];
pub mod keccak_512 {
use super::hash;
pub use self::hash::keccak_512 as unchecked;
pub fn write(input: &[u8], output: &mut [u8]) {
unsafe { hash::keccak_512(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) };
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `sha3_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe { hash::keccak_512(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) };
}
}
pub mod keccak_256 {
use super::hash;
pub use self::hash::keccak_256 as unchecked;
#[allow(dead_code)]
pub fn write(input: &[u8], output: &mut [u8]) {
unsafe { hash::keccak_256(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) };
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `sha3_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe { hash::keccak_256(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) };
}
}

View File

@ -14,28 +14,35 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethash implementation
//! See https://github.com/ethereum/wiki/wiki/Ethash
#![cfg_attr(feature = "benches", feature(test))] #![cfg_attr(feature = "benches", feature(test))]
extern crate primal; extern crate primal;
extern crate hash;
extern crate parking_lot; extern crate parking_lot;
extern crate either;
extern crate memmap;
#[macro_use] #[macro_use]
extern crate crunchy; extern crate crunchy;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
mod compute;
mod compute;
mod seed_compute;
mod cache;
mod keccak;
mod shared;
pub use cache::{NodeCacheBuilder, OptimizeFor};
pub use compute::{ProofOfWork, quick_get_difficulty, slow_hash_block_number};
use compute::Light;
use keccak::H256;
use parking_lot::Mutex;
pub use seed_compute::SeedHashCompute;
pub use shared::ETHASH_EPOCH_LENGTH;
use std::mem; use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use compute::Light;
pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty, slow_get_seedhash};
use std::sync::Arc; use std::sync::Arc;
use parking_lot::Mutex;
struct LightCache { struct LightCache {
recent_epoch: Option<u64>, recent_epoch: Option<u64>,
@ -46,15 +53,17 @@ struct LightCache {
/// Light/Full cache manager. /// Light/Full cache manager.
pub struct EthashManager { pub struct EthashManager {
nodecache_builder: NodeCacheBuilder,
cache: Mutex<LightCache>, cache: Mutex<LightCache>,
cache_dir: PathBuf, cache_dir: PathBuf,
} }
impl EthashManager { impl EthashManager {
/// Create a new new instance of ethash manager /// Create a new new instance of ethash manager
pub fn new<T: AsRef<Path>>(cache_dir: T) -> EthashManager { pub fn new<T: Into<Option<OptimizeFor>>>(cache_dir: &Path, optimize_for: T) -> EthashManager {
EthashManager { EthashManager {
cache_dir: cache_dir.as_ref().to_path_buf(), cache_dir: cache_dir.to_path_buf(),
nodecache_builder: NodeCacheBuilder::new(optimize_for.into().unwrap_or_default()),
cache: Mutex::new(LightCache { cache: Mutex::new(LightCache {
recent_epoch: None, recent_epoch: None,
recent: None, recent: None,
@ -96,11 +105,19 @@ impl EthashManager {
}; };
match light { match light {
None => { None => {
let light = match Light::from_file(&self.cache_dir, block_number) { let light = match Light::from_file_with_builder(
&self.nodecache_builder,
&self.cache_dir,
block_number,
) {
Ok(light) => Arc::new(light), Ok(light) => Arc::new(light),
Err(e) => { Err(e) => {
debug!("Light cache file not found for {}:{}", block_number, e); debug!("Light cache file not found for {}:{}", block_number, e);
let light = Light::new(&self.cache_dir, block_number); let mut light = Light::new_with_builder(
&self.nodecache_builder,
&self.cache_dir,
block_number,
);
if let Err(e) = light.to_file() { if let Err(e) = light.to_file() {
warn!("Light cache file write error: {}", e); warn!("Light cache file write error: {}", e);
} }
@ -120,7 +137,7 @@ impl EthashManager {
#[test] #[test]
fn test_lru() { fn test_lru() {
let ethash = EthashManager::new(&::std::env::temp_dir()); let ethash = EthashManager::new(&::std::env::temp_dir(), None);
let hash = [0u8; 32]; let hash = [0u8; 32];
ethash.compute_light(1, &hash, 1); ethash.compute_light(1, &hash, 1);
ethash.compute_light(50000, &hash, 1); ethash.compute_light(50000, &hash, 1);
@ -138,24 +155,89 @@ fn test_lru() {
mod benchmarks { mod benchmarks {
extern crate test; extern crate test;
use compute::{Light, light_compute, SeedHashCompute};
use self::test::Bencher; use self::test::Bencher;
use cache::{NodeCacheBuilder, OptimizeFor};
use compute::{Light, light_compute};
const HASH: [u8; 32] = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe,
0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f,
0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
const NONCE: u64 = 0xd7b3ac70a301a249;
#[bench] #[bench]
fn bench_light_compute(b: &mut Bencher) { fn bench_light_compute_memmap(b: &mut Bencher) {
use ::std::env; use std::env;
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72]; let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let nonce = 0xd7b3ac70a301a249; let light = Light::new_with_builder(&builder, &env::temp_dir(), 486382);
let light = Light::new(env::temp_dir(), 486382);
b.iter(|| light_compute(&light, &hash, nonce)); b.iter(|| light_compute(&light, &HASH, NONCE));
} }
#[bench] #[bench]
fn bench_seedhash(b: &mut Bencher) { fn bench_light_compute_memory(b: &mut Bencher) {
let seed_compute = SeedHashCompute::new(); use std::env;
b.iter(|| seed_compute.get_seedhash(486382)); let light = Light::new(&env::temp_dir(), 486382);
b.iter(|| light_compute(&light, &HASH, NONCE));
}
#[bench]
#[ignore]
fn bench_light_new_round_trip_memmap(b: &mut Bencher) {
use std::env;
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let light = Light::new_with_builder(&builder, &env::temp_dir(), 486382);
light_compute(&light, &HASH, NONCE);
});
}
#[bench]
#[ignore]
fn bench_light_new_round_trip_memory(b: &mut Bencher) {
use std::env;
b.iter(|| {
let light = Light::new(&env::temp_dir(), 486382);
light_compute(&light, &HASH, NONCE);
});
}
#[bench]
fn bench_light_from_file_round_trip_memory(b: &mut Bencher) {
use std::env;
let dir = env::temp_dir();
let height = 486382;
{
let mut dummy = Light::new(&dir, height);
dummy.to_file().unwrap();
}
b.iter(|| {
let light = Light::from_file(&dir, 486382).unwrap();
light_compute(&light, &HASH, NONCE);
});
}
#[bench]
fn bench_light_from_file_round_trip_memmap(b: &mut Bencher) {
use std::env;
let dir = env::temp_dir();
let height = 486382;
{
let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let mut dummy = Light::new_with_builder(&builder, &dir, height);
dummy.to_file().unwrap();
}
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let light = Light::from_file_with_builder(&builder, &dir, 486382).unwrap();
light_compute(&light, &HASH, NONCE);
});
} }
} }

109
ethash/src/seed_compute.rs Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use shared;
use keccak::{keccak_256, H256};
use std::cell::Cell;
pub struct SeedHashCompute {
prev_epoch: Cell<u64>,
prev_seedhash: Cell<H256>,
}
impl SeedHashCompute {
#[inline]
pub fn new() -> SeedHashCompute {
SeedHashCompute {
prev_epoch: Cell::new(0),
prev_seedhash: Cell::new([0u8; 32]),
}
}
#[inline]
fn reset_cache(&self) {
self.prev_epoch.set(0);
self.prev_seedhash.set([0u8; 32]);
}
#[inline]
pub fn hash_block_number(&self, block_number: u64) -> H256 {
self.hash_epoch(shared::epoch(block_number))
}
#[inline]
pub fn hash_epoch(&self, epoch: u64) -> H256 {
if epoch < self.prev_epoch.get() {
// can't build on previous hash if requesting an older block
self.reset_cache();
}
if epoch > self.prev_epoch.get() {
let seed_hash = SeedHashCompute::resume_compute_seedhash(
self.prev_seedhash.get(),
self.prev_epoch.get(),
epoch,
);
self.prev_seedhash.set(seed_hash);
self.prev_epoch.set(epoch);
}
self.prev_seedhash.get()
}
#[inline]
pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 {
for _ in start_epoch..end_epoch {
keccak_256::inplace(&mut hash);
}
hash
}
}
#[cfg(test)]
mod tests {
use super::SeedHashCompute;
#[test]
fn test_seed_compute_once() {
let seed_compute = SeedHashCompute::new();
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_zero() {
let seed_compute = SeedHashCompute::new();
assert_eq!(seed_compute.hash_block_number(0), [0u8; 32]);
}
#[test]
fn test_seed_compute_after_older() {
let seed_compute = SeedHashCompute::new();
// calculating an older value first shouldn't affect the result
let _ = seed_compute.hash_block_number(50000);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_after_newer() {
let seed_compute = SeedHashCompute::new();
// calculating an newer value first shouldn't affect the result
let _ = seed_compute.hash_block_number(972764);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
}

149
ethash/src/shared.rs Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use primal::is_prime;
pub const DATASET_BYTES_INIT: u64 = 1 << 30;
pub const DATASET_BYTES_GROWTH: u64 = 1 << 23;
pub const CACHE_BYTES_INIT: u64 = 1 << 24;
pub const CACHE_BYTES_GROWTH: u64 = 1 << 17;
pub const ETHASH_EPOCH_LENGTH: u64 = 30000;
pub const ETHASH_CACHE_ROUNDS: usize = 3;
pub const ETHASH_MIX_BYTES: usize = 128;
pub const ETHASH_ACCESSES: usize = 64;
pub const ETHASH_DATASET_PARENTS: u32 = 256;
pub const NODE_DWORDS: usize = NODE_WORDS / 2;
pub const NODE_WORDS: usize = NODE_BYTES / 4;
pub const NODE_BYTES: usize = 64;
pub fn epoch(block_number: u64) -> u64 {
block_number / ETHASH_EPOCH_LENGTH
}
static CHARS: &'static [u8] = b"0123456789abcdef";
pub fn to_hex(bytes: &[u8]) -> String {
let mut v = Vec::with_capacity(bytes.len() * 2);
for &byte in bytes.iter() {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
}
unsafe { String::from_utf8_unchecked(v) }
}
pub fn get_cache_size(block_number: u64) -> usize {
// TODO: Memoise
let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - NODE_BYTES as u64;
while !is_prime(sz / NODE_BYTES as u64) {
sz = sz - 2 * NODE_BYTES as u64;
}
sz as usize
}
pub fn get_data_size(block_number: u64) -> usize {
// TODO: Memoise
let mut sz: u64 = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - ETHASH_MIX_BYTES as u64;
while !is_prime(sz / ETHASH_MIX_BYTES as u64) {
sz = sz - 2 * ETHASH_MIX_BYTES as u64;
}
sz as usize
}
pub type NodeBytes = [u8; NODE_BYTES];
pub type NodeWords = [u32; NODE_WORDS];
pub type NodeDwords = [u64; NODE_DWORDS];
macro_rules! static_assert_size_eq {
(@inner $a:ty, $b:ty, $($rest:ty),*) => {
fn first() {
static_assert_size_eq!($a, $b);
}
fn second() {
static_assert_size_eq!($b, $($rest),*);
}
};
(@inner $a:ty, $b:ty) => {
unsafe {
let val: $b = ::std::mem::uninitialized();
let _: $a = ::std::mem::transmute(val);
}
};
($($rest:ty),*) => {
static_assert_size_eq!(size_eq: $($rest),*);
};
($name:ident : $($rest:ty),*) => {
#[allow(dead_code)]
fn $name() {
static_assert_size_eq!(@inner $($rest),*);
}
};
}
static_assert_size_eq!(Node, NodeBytes, NodeWords, NodeDwords);
#[repr(C)]
pub union Node {
pub dwords: NodeDwords,
pub words: NodeWords,
pub bytes: NodeBytes,
}
impl Clone for Node {
fn clone(&self) -> Self {
unsafe { Node { bytes: *&self.bytes } }
}
}
// We use `inline(always)` because I was experiencing an 100% slowdown and `perf` showed that these
// calls were taking up ~30% of the runtime. Adding these annotations fixes the issue. Remove at
// your peril, if and only if you have benchmarks to prove that this doesn't reintroduce the
// performance regression. It's not caused by the `debug_assert_eq!` either, your guess is as good
// as mine.
impl Node {
#[inline(always)]
pub fn as_bytes(&self) -> &NodeBytes {
unsafe { &self.bytes }
}
#[inline(always)]
pub fn as_bytes_mut(&mut self) -> &mut NodeBytes {
unsafe { &mut self.bytes }
}
#[inline(always)]
pub fn as_words(&self) -> &NodeWords {
unsafe { &self.words }
}
#[inline(always)]
pub fn as_words_mut(&mut self) -> &mut NodeWords {
unsafe { &mut self.words }
}
#[inline(always)]
pub fn as_dwords(&self) -> &NodeDwords {
unsafe { &self.dwords }
}
#[inline(always)]
pub fn as_dwords_mut(&mut self) -> &mut NodeDwords {
unsafe { &mut self.dwords }
}
}

View File

@ -51,6 +51,7 @@ lru-cache = "0.1.0"
native-contracts = { path = "native_contracts" } native-contracts = { path = "native_contracts" }
num = "0.1" num = "0.1"
num_cpus = "1.2" num_cpus = "1.2"
parity-machine = { path = "../machine" }
parking_lot = "0.4" parking_lot = "0.4"
price-info = { path = "../price-info" } price-info = { path = "../price-info" }
rayon = "0.8" rayon = "0.8"

View File

@ -19,7 +19,8 @@
use std::sync::Arc; use std::sync::Arc;
use ethcore::encoded; use ethcore::encoded;
use ethcore::engines::{Engine, StateDependentProof}; use ethcore::engines::{EthEngine, StateDependentProof};
use ethcore::machine::EthereumMachine;
use ethcore::header::Header; use ethcore::header::Header;
use ethcore::receipt::Receipt; use ethcore::receipt::Receipt;
use futures::future::IntoFuture; use futures::future::IntoFuture;
@ -44,7 +45,12 @@ pub trait ChainDataFetcher: Send + Sync + 'static {
fn block_receipts(&self, header: &Header) -> Self::Receipts; fn block_receipts(&self, header: &Header) -> Self::Receipts;
/// Fetch epoch transition proof at given header. /// Fetch epoch transition proof at given header.
fn epoch_transition(&self, hash: H256, engine: Arc<Engine>, checker: Arc<StateDependentProof>) -> Self::Transition; fn epoch_transition(
&self,
_hash: H256,
_engine: Arc<EthEngine>,
_checker: Arc<StateDependentProof<EthereumMachine>>
) -> Self::Transition;
} }
/// Fetcher implementation which cannot fetch anything. /// Fetcher implementation which cannot fetch anything.
@ -68,7 +74,12 @@ impl ChainDataFetcher for Unavailable {
Err("fetching block receipts unavailable") Err("fetching block receipts unavailable")
} }
fn epoch_transition(&self, _h: H256, _e: Arc<Engine>, _check: Arc<StateDependentProof>) -> Self::Transition { fn epoch_transition(
&self,
_hash: H256,
_engine: Arc<EthEngine>,
_checker: Arc<StateDependentProof<EthereumMachine>>
) -> Self::Transition {
Err("fetching epoch transition proofs unavailable") Err("fetching epoch transition proofs unavailable")
} }
} }

View File

@ -20,7 +20,8 @@ use std::sync::{Weak, Arc};
use ethcore::block_status::BlockStatus; use ethcore::block_status::BlockStatus;
use ethcore::client::{ClientReport, EnvInfo}; use ethcore::client::{ClientReport, EnvInfo};
use ethcore::engines::{epoch, Engine, EpochChange, EpochTransition, Proof, Unsure}; use ethcore::engines::{epoch, EthEngine, EpochChange, EpochTransition, Proof};
use ethcore::machine::EthereumMachine;
use ethcore::error::BlockImportError; use ethcore::error::BlockImportError;
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::header::{BlockNumber, Header}; use ethcore::header::{BlockNumber, Header};
@ -117,7 +118,7 @@ pub trait LightChainClient: Send + Sync {
fn env_info(&self, id: BlockId) -> Option<EnvInfo>; fn env_info(&self, id: BlockId) -> Option<EnvInfo>;
/// Get a handle to the consensus engine. /// Get a handle to the consensus engine.
fn engine(&self) -> &Arc<Engine>; fn engine(&self) -> &Arc<EthEngine>;
/// Query whether a block is known. /// Query whether a block is known.
fn is_known(&self, hash: &H256) -> bool; fn is_known(&self, hash: &H256) -> bool;
@ -165,7 +166,7 @@ impl<T: LightChainClient> AsLightClient for T {
/// Light client implementation. /// Light client implementation.
pub struct Client<T> { pub struct Client<T> {
queue: HeaderQueue, queue: HeaderQueue,
engine: Arc<Engine>, engine: Arc<EthEngine>,
chain: HeaderChain, chain: HeaderChain,
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
import_lock: Mutex<()>, import_lock: Mutex<()>,
@ -381,7 +382,7 @@ impl<T: ChainDataFetcher> Client<T> {
} }
/// Get a handle to the verification engine. /// Get a handle to the verification engine.
pub fn engine(&self) -> &Arc<Engine> { pub fn engine(&self) -> &Arc<EthEngine> {
&self.engine &self.engine
} }
@ -444,7 +445,7 @@ impl<T: ChainDataFetcher> Client<T> {
}; };
// Verify Block Family // Verify Block Family
let verify_family_result = self.engine.verify_block_family(&verified_header, &parent_header.decode(), None); let verify_family_result = self.engine.verify_block_family(&verified_header, &parent_header.decode());
if let Err(e) = verify_family_result { if let Err(e) = verify_family_result {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}",
verified_header.number(), verified_header.hash(), e); verified_header.number(), verified_header.hash(), e);
@ -453,7 +454,7 @@ impl<T: ChainDataFetcher> Client<T> {
}; };
// "external" verification. // "external" verification.
let verify_external_result = self.engine.verify_block_external(&verified_header, None); let verify_external_result = self.engine.verify_block_external(&verified_header);
if let Err(e) = verify_external_result { if let Err(e) = verify_external_result {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}",
verified_header.number(), verified_header.hash(), e); verified_header.number(), verified_header.hash(), e);
@ -465,20 +466,35 @@ impl<T: ChainDataFetcher> Client<T> {
true true
} }
fn check_epoch_signal(&self, verified_header: &Header) -> Result<Option<Proof>, T::Error> { fn check_epoch_signal(&self, verified_header: &Header) -> Result<Option<Proof<EthereumMachine>>, T::Error> {
let (mut block, mut receipts) = (None, None); use ethcore::machine::{AuxiliaryRequest, AuxiliaryData};
// First, check without providing auxiliary data. let mut block: Option<Vec<u8>> = None;
match self.engine.signals_epoch_end(verified_header, None, None) { let mut receipts: Option<Vec<_>> = None;
loop {
let is_signal = {
let auxiliary = AuxiliaryData {
bytes: block.as_ref().map(|x| &x[..]),
receipts: receipts.as_ref().map(|x| &x[..]),
};
self.engine.signals_epoch_end(verified_header, auxiliary)
};
// check with any auxiliary data fetched so far
match is_signal {
EpochChange::No => return Ok(None), EpochChange::No => return Ok(None),
EpochChange::Yes(proof) => return Ok(Some(proof)), EpochChange::Yes(proof) => return Ok(Some(proof)),
EpochChange::Unsure(unsure) => { EpochChange::Unsure(unsure) => {
let (b, r) = match unsure { let (b, r) = match unsure {
Unsure::NeedsBody => AuxiliaryRequest::Body =>
(Some(self.fetcher.block_body(verified_header)), None), (Some(self.fetcher.block_body(verified_header)), None),
Unsure::NeedsReceipts => AuxiliaryRequest::Receipts =>
(None, Some(self.fetcher.block_receipts(verified_header))), (None, Some(self.fetcher.block_receipts(verified_header))),
Unsure::NeedsBoth => ( AuxiliaryRequest::Both => (
Some(self.fetcher.block_body(verified_header)), Some(self.fetcher.block_body(verified_header)),
Some(self.fetcher.block_receipts(verified_header)), Some(self.fetcher.block_receipts(verified_header)),
), ),
@ -493,22 +509,11 @@ impl<T: ChainDataFetcher> Client<T> {
} }
} }
} }
let block = block.as_ref().map(|x| &x[..]);
let receipts = receipts.as_ref().map(|x| &x[..]);
// Check again now that required data has been fetched.
match self.engine.signals_epoch_end(verified_header, block, receipts) {
EpochChange::No => return Ok(None),
EpochChange::Yes(proof) => return Ok(Some(proof)),
EpochChange::Unsure(_) =>
panic!("Detected faulty engine implementation: requests additional \
data to check epoch end signal when everything necessary provided"),
} }
} }
// attempts to fetch the epoch proof from the network until successful. // attempts to fetch the epoch proof from the network until successful.
fn write_pending_proof(&self, header: &Header, proof: Proof) -> Result<(), T::Error> { fn write_pending_proof(&self, header: &Header, proof: Proof<EthereumMachine>) -> Result<(), T::Error> {
let proof = match proof { let proof = match proof {
Proof::Known(known) => known, Proof::Known(known) => known,
Proof::WithState(state_dependent) => { Proof::WithState(state_dependent) => {
@ -568,7 +573,7 @@ impl<T: ChainDataFetcher> LightChainClient for Client<T> {
Client::env_info(self, id) Client::env_info(self, id)
} }
fn engine(&self) -> &Arc<Engine> { fn engine(&self) -> &Arc<EthEngine> {
Client::engine(self) Client::engine(self)
} }

View File

@ -899,7 +899,7 @@ impl LightProtocol {
// the maximum amount of requests we'll fill in a single packet. // the maximum amount of requests we'll fill in a single packet.
const MAX_REQUESTS: usize = 256; const MAX_REQUESTS: usize = 256;
use ::request::RequestBuilder; use ::request::Builder;
use ::request::CompleteRequest; use ::request::CompleteRequest;
let peers = self.peers.read(); let peers = self.peers.read();
@ -914,7 +914,7 @@ impl LightProtocol {
let peer: &mut Peer = &mut *peer; let peer: &mut Peer = &mut *peer;
let req_id: u64 = raw.val_at(0)?; let req_id: u64 = raw.val_at(0)?;
let mut request_builder = RequestBuilder::default(); let mut request_builder = Builder::default();
trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id);

View File

@ -147,7 +147,7 @@ fn compute_timeout(reqs: &Requests) -> Duration {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use net::ReqId; use net::ReqId;
use request::RequestBuilder; use request::Builder;
use time::{SteadyTime, Duration}; use time::{SteadyTime, Duration};
use super::{RequestSet, compute_timeout}; use super::{RequestSet, compute_timeout};
@ -156,7 +156,7 @@ mod tests {
let test_begin = SteadyTime::now(); let test_begin = SteadyTime::now();
let mut req_set = RequestSet::default(); let mut req_set = RequestSet::default();
let the_req = RequestBuilder::default().build(); let the_req = Builder::default().build();
let req_time = compute_timeout(&the_req); let req_time = compute_timeout(&the_req);
req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin); req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin);
req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1)); req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1));
@ -173,7 +173,7 @@ mod tests {
#[test] #[test]
fn cumulative_cost() { fn cumulative_cost() {
let the_req = RequestBuilder::default().build(); let the_req = Builder::default().build();
let test_begin = SteadyTime::now(); let test_begin = SteadyTime::now();
let test_end = test_begin + Duration::seconds(1); let test_end = test_begin + Duration::seconds(1);
let mut req_set = RequestSet::default(); let mut req_set = RequestSet::default();

View File

@ -41,7 +41,7 @@ use std::sync::Arc;
// helper for encoding a single request into a packet. // helper for encoding a single request into a packet.
// panics on bad backreference. // panics on bad backreference.
fn encode_single(request: Request) -> NetworkRequests { fn encode_single(request: Request) -> NetworkRequests {
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
builder.push(request).unwrap(); builder.push(request).unwrap();
builder.build() builder.build()
} }
@ -344,7 +344,7 @@ fn get_block_bodies() {
proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status); proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &my_status);
} }
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
let mut bodies = Vec::new(); let mut bodies = Vec::new();
for i in 0..10 { for i in 0..10 {
@ -400,7 +400,7 @@ fn get_block_receipts() {
.take(10) .take(10)
.collect(); .collect();
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
let mut receipts = Vec::new(); let mut receipts = Vec::new();
for hash in block_hashes.iter().cloned() { for hash in block_hashes.iter().cloned() {
builder.push(Request::Receipts(IncompleteReceiptsRequest { hash: hash.into() })).unwrap(); builder.push(Request::Receipts(IncompleteReceiptsRequest { hash: hash.into() })).unwrap();
@ -448,7 +448,7 @@ fn get_state_proofs() {
let key1: H256 = U256::from(11223344).into(); let key1: H256 = U256::from(11223344).into();
let key2: H256 = U256::from(99988887).into(); let key2: H256 = U256::from(99988887).into();
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
builder.push(Request::Account(IncompleteAccountRequest { builder.push(Request::Account(IncompleteAccountRequest {
block_hash: H256::default().into(), block_hash: H256::default().into(),
address_hash: key1.into(), address_hash: key1.into(),

View File

@ -74,8 +74,8 @@ impl Peer {
// Attempted request info and sender to put received value. // Attempted request info and sender to put received value.
struct Pending { struct Pending {
requests: basic_request::Requests<CheckedRequest>, requests: basic_request::Batch<CheckedRequest>,
net_requests: basic_request::Requests<NetworkRequest>, net_requests: basic_request::Batch<NetworkRequest>,
required_capabilities: Capabilities, required_capabilities: Capabilities,
responses: Vec<Response>, responses: Vec<Response>,
sender: oneshot::Sender<Vec<Response>>, sender: oneshot::Sender<Vec<Response>>,
@ -151,7 +151,7 @@ impl Pending {
fn update_net_requests(&mut self) { fn update_net_requests(&mut self) {
use request::IncompleteRequest; use request::IncompleteRequest;
let mut builder = basic_request::RequestBuilder::default(); let mut builder = basic_request::Builder::default();
let num_answered = self.requests.num_answered(); let num_answered = self.requests.num_answered();
let mut mapping = move |idx| idx - num_answered; let mut mapping = move |idx| idx - num_answered;
@ -281,7 +281,7 @@ impl OnDemand {
return Ok(receiver); return Ok(receiver);
} }
let mut builder = basic_request::RequestBuilder::default(); let mut builder = basic_request::Builder::default();
let responses = Vec::with_capacity(requests.len()); let responses = Vec::with_capacity(requests.len());

View File

@ -20,8 +20,9 @@ use std::sync::Arc;
use ethcore::basic_account::BasicAccount; use ethcore::basic_account::BasicAccount;
use ethcore::encoded; use ethcore::encoded;
use ethcore::engines::{Engine, StateDependentProof}; use ethcore::engines::{EthEngine, StateDependentProof};
use ethcore::receipt::{Receipt, TransactionOutcome}; use ethcore::machine::EthereumMachine;
use ethcore::receipt::Receipt;
use ethcore::state::{self, ProvedExecution}; use ethcore::state::{self, ProvedExecution};
use ethcore::transaction::SignedTransaction; use ethcore::transaction::SignedTransaction;
use vm::EnvInfo; use vm::EnvInfo;
@ -843,7 +844,7 @@ pub struct TransactionProof {
// TODO: it's not really possible to provide this if the header is unknown. // TODO: it's not really possible to provide this if the header is unknown.
pub env_info: EnvInfo, pub env_info: EnvInfo,
/// Consensus engine. /// Consensus engine.
pub engine: Arc<Engine>, pub engine: Arc<EthEngine>,
} }
impl TransactionProof { impl TransactionProof {
@ -858,7 +859,7 @@ impl TransactionProof {
state_items, state_items,
root, root,
&self.tx, &self.tx,
&*self.engine, self.engine.machine(),
&self.env_info, &self.env_info,
); );
@ -877,15 +878,15 @@ pub struct Signal {
/// Block hash and number to fetch proof for. /// Block hash and number to fetch proof for.
pub hash: H256, pub hash: H256,
/// Consensus engine, used to check the proof. /// Consensus engine, used to check the proof.
pub engine: Arc<Engine>, pub engine: Arc<EthEngine>,
/// Special checker for the proof. /// Special checker for the proof.
pub proof_check: Arc<StateDependentProof>, pub proof_check: Arc<StateDependentProof<EthereumMachine>>,
} }
impl Signal { impl Signal {
/// Check the signal, returning the signal or indicate that it's bad. /// Check the signal, returning the signal or indicate that it's bad.
pub fn check_response(&self, _: &Mutex<::cache::Cache>, signal: &[u8]) -> Result<Vec<u8>, Error> { pub fn check_response(&self, _: &Mutex<::cache::Cache>, signal: &[u8]) -> Result<Vec<u8>, Error> {
self.proof_check.check_proof(&*self.engine, signal) self.proof_check.check_proof(self.engine.machine(), signal)
.map(|_| signal.to_owned()) .map(|_| signal.to_owned())
.map_err(|_| Error::BadProof) .map_err(|_| Error::BadProof)
} }
@ -904,7 +905,7 @@ mod tests {
use ethcore::client::{BlockChainClient, TestBlockChainClient, EachBlockWith}; use ethcore::client::{BlockChainClient, TestBlockChainClient, EachBlockWith};
use ethcore::header::Header; use ethcore::header::Header;
use ethcore::encoded; use ethcore::encoded;
use ethcore::receipt::Receipt; use ethcore::receipt::{Receipt, TransactionOutcome};
fn make_cache() -> ::cache::Cache { fn make_cache() -> ::cache::Cache {
::cache::Cache::new(Default::default(), ::time::Duration::seconds(1)) ::cache::Cache::new(Default::default(), ::time::Duration::seconds(1))

View File

@ -25,23 +25,23 @@ use request::{
}; };
/// Build chained requests. Push them onto the series with `push`, /// Build chained requests. Push them onto the series with `push`,
/// and produce a `Requests` object with `build`. Outputs are checked for consistency. /// and produce a `Batch` object with `build`. Outputs are checked for consistency.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct RequestBuilder<T> { pub struct Builder<T> {
output_kinds: HashMap<(usize, usize), OutputKind>, output_kinds: HashMap<(usize, usize), OutputKind>,
requests: Vec<T>, requests: Vec<T>,
} }
impl<T> Default for RequestBuilder<T> { impl<T> Default for Builder<T> {
fn default() -> Self { fn default() -> Self {
RequestBuilder { Builder {
output_kinds: HashMap::new(), output_kinds: HashMap::new(),
requests: Vec::new(), requests: Vec::new(),
} }
} }
} }
impl<T: IncompleteRequest> RequestBuilder<T> { impl<T: IncompleteRequest> Builder<T> {
/// Attempt to push a request onto the request chain. Fails if the request /// Attempt to push a request onto the request chain. Fails if the request
/// references a non-existent output of a prior request. /// references a non-existent output of a prior request.
pub fn push(&mut self, request: T) -> Result<(), NoSuchOutput> { pub fn push(&mut self, request: T) -> Result<(), NoSuchOutput> {
@ -62,9 +62,9 @@ impl<T: IncompleteRequest> RequestBuilder<T> {
&self.output_kinds &self.output_kinds
} }
/// Convert this into a "requests" object. /// Convert this into a "batch" object.
pub fn build(self) -> Requests<T> { pub fn build(self) -> Batch<T> {
Requests { Batch {
outputs: HashMap::new(), outputs: HashMap::new(),
requests: self.requests, requests: self.requests,
answered: 0, answered: 0,
@ -74,13 +74,13 @@ impl<T: IncompleteRequest> RequestBuilder<T> {
/// Requests pending responses. /// Requests pending responses.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct Requests<T> { pub struct Batch<T> {
outputs: HashMap<(usize, usize), Output>, outputs: HashMap<(usize, usize), Output>,
requests: Vec<T>, requests: Vec<T>,
answered: usize, answered: usize,
} }
impl<T> Requests<T> { impl<T> Batch<T> {
/// Get access to the underlying slice of requests. /// Get access to the underlying slice of requests.
// TODO: unimplemented -> Vec<Request>, // do we _have to_ allocate? // TODO: unimplemented -> Vec<Request>, // do we _have to_ allocate?
pub fn requests(&self) -> &[T] { &self.requests } pub fn requests(&self) -> &[T] { &self.requests }
@ -94,10 +94,10 @@ impl<T> Requests<T> {
} }
/// Map requests from one type into another. /// Map requests from one type into another.
pub fn map_requests<F, U>(self, f: F) -> Requests<U> pub fn map_requests<F, U>(self, f: F) -> Batch<U>
where F: FnMut(T) -> U, U: IncompleteRequest where F: FnMut(T) -> U, U: IncompleteRequest
{ {
Requests { Batch {
outputs: self.outputs, outputs: self.outputs,
requests: self.requests.into_iter().map(f).collect(), requests: self.requests.into_iter().map(f).collect(),
answered: self.answered, answered: self.answered,
@ -105,7 +105,7 @@ impl<T> Requests<T> {
} }
} }
impl<T: IncompleteRequest + Clone> Requests<T> { impl<T: IncompleteRequest + Clone> Batch<T> {
/// Get the next request as a filled request. Returns `None` when all requests answered. /// Get the next request as a filled request. Returns `None` when all requests answered.
pub fn next_complete(&self) -> Option<T::Complete> { pub fn next_complete(&self) -> Option<T::Complete> {
if self.is_complete() { if self.is_complete() {
@ -113,7 +113,7 @@ impl<T: IncompleteRequest + Clone> Requests<T> {
} else { } else {
Some(self.requests[self.answered].clone() Some(self.requests[self.answered].clone()
.complete() .complete()
.expect("All outputs checked as invariant of `Requests` object; qed")) .expect("All outputs checked as invariant of `Batch` object; qed"))
} }
} }
@ -149,7 +149,7 @@ impl<T: IncompleteRequest + Clone> Requests<T> {
} }
} }
impl<T: super::CheckedRequest + Clone> Requests<T> { impl<T: super::CheckedRequest + Clone> Batch<T> {
/// Supply a response for the next request. /// Supply a response for the next request.
/// Fails on: wrong request kind, all requests answered already. /// Fails on: wrong request kind, all requests answered already.
pub fn supply_response(&mut self, env: &T::Environment, response: &T::Response) pub fn supply_response(&mut self, env: &T::Environment, response: &T::Response)
@ -170,7 +170,7 @@ impl<T: super::CheckedRequest + Clone> Requests<T> {
} }
} }
impl Requests<super::Request> { impl Batch<super::Request> {
/// For each request, produce a response. /// For each request, produce a response.
/// The responses vector produced goes up to the point where the responder /// The responses vector produced goes up to the point where the responder
/// first returns `None`, an invalid response, or until all requests have been responded to. /// first returns `None`, an invalid response, or until all requests have been responded to.
@ -193,7 +193,7 @@ impl Requests<super::Request> {
} }
} }
impl<T: IncompleteRequest> Deref for Requests<T> { impl<T: IncompleteRequest> Deref for Batch<T> {
type Target = [T]; type Target = [T];
fn deref(&self) -> &[T] { fn deref(&self) -> &[T] {
@ -201,7 +201,7 @@ impl<T: IncompleteRequest> Deref for Requests<T> {
} }
} }
impl<T: IncompleteRequest> DerefMut for Requests<T> { impl<T: IncompleteRequest> DerefMut for Batch<T> {
fn deref_mut(&mut self) -> &mut [T] { fn deref_mut(&mut self) -> &mut [T] {
&mut self.requests[..] &mut self.requests[..]
} }
@ -210,12 +210,12 @@ impl<T: IncompleteRequest> DerefMut for Requests<T> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use request::*; use request::*;
use super::RequestBuilder; use super::Builder;
use bigint::hash::H256; use bigint::hash::H256;
#[test] #[test]
fn all_scalar() { fn all_scalar() {
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { builder.push(Request::HeaderProof(IncompleteHeaderProofRequest {
num: 100.into(), num: 100.into(),
})).unwrap(); })).unwrap();
@ -227,7 +227,7 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn missing_backref() { fn missing_backref() {
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { builder.push(Request::HeaderProof(IncompleteHeaderProofRequest {
num: Field::BackReference(100, 3), num: Field::BackReference(100, 3),
})).unwrap(); })).unwrap();
@ -236,7 +236,7 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn wrong_kind() { fn wrong_kind() {
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
assert!(builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { assert!(builder.push(Request::HeaderProof(IncompleteHeaderProofRequest {
num: 100.into(), num: 100.into(),
})).is_ok()); })).is_ok());
@ -247,7 +247,7 @@ mod tests {
#[test] #[test]
fn good_backreference() { fn good_backreference() {
let mut builder = RequestBuilder::default(); let mut builder = Builder::default();
builder.push(Request::HeaderProof(IncompleteHeaderProofRequest { builder.push(Request::HeaderProof(IncompleteHeaderProofRequest {
num: 100.into(), // header proof puts hash at output 0. num: 100.into(), // header proof puts hash at output 0.
})).unwrap(); })).unwrap();

View File

@ -19,7 +19,7 @@
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
use bigint::hash::H256; use bigint::hash::H256;
mod builder; mod batch;
// re-exports of request types. // re-exports of request types.
pub use self::header::{ pub use self::header::{
@ -73,7 +73,7 @@ pub use self::epoch_signal::{
Response as SignalResponse, Response as SignalResponse,
}; };
pub use self::builder::{RequestBuilder, Requests}; pub use self::batch::{Batch, Builder};
/// Error indicating a reference to a non-existent or wrongly-typed output. /// Error indicating a reference to a non-existent or wrongly-typed output.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
@ -241,7 +241,7 @@ impl Encodable for HashOrNumber {
} }
/// Type alias for "network requests". /// Type alias for "network requests".
pub type NetworkRequests = Requests<Request>; pub type NetworkRequests = Batch<Request>;
/// All request types, as they're sent over the network. /// All request types, as they're sent over the network.
/// They may be incomplete, with back-references to outputs /// They may be incomplete, with back-references to outputs

View File

@ -133,7 +133,7 @@ mod test {
fn node_filter() { fn node_filter() {
let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap(); let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap();
let data = include_bytes!("../res/node_filter.json"); let data = include_bytes!("../res/node_filter.json");
let spec = Spec::load(::std::env::temp_dir(), &data[..]).unwrap(); let spec = Spec::load(&::std::env::temp_dir(), &data[..]).unwrap();
let client_db = Arc::new(::util::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))); let client_db = Arc::new(::util::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0)));
let client = Client::new( let client = Client::new(

View File

@ -1,7 +1,9 @@
{ {
"name": "GenesisConstructor", "name": "GenesisConstructor",
"engine": { "engine": {
"null": null "null": {
"params": {}
}
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",

View File

@ -6,12 +6,12 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x0", "homesteadTransition": "0x0",
"eip150Transition": "0x0", "eip150Transition": "0x0",
"eip160Transition": "0x0", "eip160Transition": "0x0",
"eip161abcTransition": "0x0", "eip161abcTransition": "0x0",
"eip161dTransition": "0x0", "eip161dTransition": "0x0",
"maxCodeSize": 24576,
"eip649Reward": "0x29A2241AF62C0000", "eip649Reward": "0x29A2241AF62C0000",
"eip100bTransition": "0x0", "eip100bTransition": "0x0",
"eip649Transition": "0x0" "eip649Transition": "0x0"
@ -20,12 +20,12 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"maxCodeSize": 24576,
"eip98Transition": "0xffffffffffffffff", "eip98Transition": "0xffffffffffffffff",
"eip140Transition": "0x0", "eip140Transition": "0x0",
"eip211Transition": "0x0", "eip211Transition": "0x0",

View File

@ -7,6 +7,7 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": 1150000, "homesteadTransition": 1150000,
"eip150Transition": 2500000, "eip150Transition": 2500000,
"eip160Transition": 3000000, "eip160Transition": 3000000,
@ -21,7 +22,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",

View File

@ -6,12 +6,12 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x0", "homesteadTransition": "0x0",
"eip150Transition": "0x0", "eip150Transition": "0x0",
"eip160Transition": "0x0", "eip160Transition": "0x0",
"eip161abcTransition": "0x0", "eip161abcTransition": "0x0",
"eip161dTransition": "0x0", "eip161dTransition": "0x0",
"maxCodeSize": 24576,
"eip649Reward": "0x29A2241AF62C0000", "eip649Reward": "0x29A2241AF62C0000",
"eip100bTransition": "0x0", "eip100bTransition": "0x0",
"eip649Transition": "0x0" "eip649Transition": "0x0"
@ -20,12 +20,12 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"maxCodeSize": 24576,
"eip98Transition": "0xffffffffffffffff", "eip98Transition": "0xffffffffffffffff",
"eip140Transition": "0x0", "eip140Transition": "0x0",
"eip210Transition": "0x0", "eip210Transition": "0x0",

View File

@ -6,18 +6,17 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x0", "homesteadTransition": "0x0",
"eip150Transition": "0x0", "eip150Transition": "0x0",
"eip160Transition": "0x7fffffffffffffff", "eip160Transition": "0x7fffffffffffffff",
"eip161abcTransition": "0x7fffffffffffffff", "eip161abcTransition": "0x7fffffffffffffff",
"eip161dTransition": "0x7fffffffffffffff", "eip161dTransition": "0x7fffffffffffffff"
"maxCodeSize": 24576
} }
} }
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
@ -25,7 +24,8 @@
"networkID" : "0x1", "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffffff", "eip98Transition": "0x7fffffffffffffff",
"eip86Transition": "0x7fffffffffffffff", "eip86Transition": "0x7fffffffffffffff",
"eip155Transition": "0x7fffffffffffffff" "eip155Transition": "0x7fffffffffffffff",
"maxCodeSize": 24576
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -6,18 +6,17 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x0", "homesteadTransition": "0x0",
"eip150Transition": "0x0", "eip150Transition": "0x0",
"eip160Transition": "0x0", "eip160Transition": "0x0",
"eip161abcTransition": "0x0", "eip161abcTransition": "0x0",
"eip161dTransition": "0x0", "eip161dTransition": "0x0"
"maxCodeSize": 24576
} }
} }
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
@ -25,7 +24,8 @@
"networkID" : "0x1", "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffffff", "eip98Transition": "0x7fffffffffffffff",
"eip86Transition": "0x7fffffffffffffff", "eip86Transition": "0x7fffffffffffffff",
"eip155Transition": "0x7fffffffffffffff" "eip155Transition": "0x7fffffffffffffff",
"maxCodeSize": 24576
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -8,6 +8,7 @@
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"difficultyIncrementDivisor": "60", "difficultyIncrementDivisor": "60",
"durationLimit": "0x3C", "durationLimit": "0x3C",
"blockReward": "0x6f05b59d3b200000",
"homesteadTransition": "0x30d40", "homesteadTransition": "0x30d40",
"difficultyHardforkTransition": "0x59d9", "difficultyHardforkTransition": "0x59d9",
"difficultyHardforkBoundDivisor": "0x0200", "difficultyHardforkBoundDivisor": "0x0200",
@ -21,7 +22,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x6f05b59d3b200000",
"registrar" : "0x6c221ca53705f3497ec90ca7b84c59ae7382fc21", "registrar" : "0x6c221ca53705f3497ec90ca7b84c59ae7382fc21",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",

View File

@ -7,6 +7,7 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x118c30", "homesteadTransition": "0x118c30",
"daoHardforkTransition": "0x1d4c00", "daoHardforkTransition": "0x1d4c00",
"daoHardforkBeneficiary": "0xbf4ed7b27f1d666546e30d74d50d173d20bca754", "daoHardforkBeneficiary": "0xbf4ed7b27f1d666546e30d74d50d173d20bca754",
@ -131,14 +132,12 @@
"eip150Transition": "0x259518", "eip150Transition": "0x259518",
"eip160Transition": 2675000, "eip160Transition": 2675000,
"eip161abcTransition": 2675000, "eip161abcTransition": 2675000,
"eip161dTransition": 2675000, "eip161dTransition": 2675000
"maxCodeSize": 24576
} }
} }
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xe3389675d0338462dC76C6f9A3e432550c36A142", "registrar" : "0xe3389675d0338462dC76C6f9A3e432550c36A142",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
@ -149,7 +148,8 @@
"eip155Transition": 2675000, "eip155Transition": 2675000,
"eip98Transition": "0x7fffffffffffff", "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff" "eip86Transition": "0x7fffffffffffff",
"maxCodeSize": 24576
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -6,6 +6,7 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x118c30", "homesteadTransition": "0x118c30",
"daoHardforkTransition": "0x1d4c00", "daoHardforkTransition": "0x1d4c00",
"daoHardforkBeneficiary": "0xbf4ed7b27f1d666546e30d74d50d173d20bca754", "daoHardforkBeneficiary": "0xbf4ed7b27f1d666546e30d74d50d173d20bca754",
@ -136,7 +137,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",

View File

@ -6,6 +6,7 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x7fffffffffffffff", "homesteadTransition": "0x7fffffffffffffff",
"eip150Transition": "0x7fffffffffffffff", "eip150Transition": "0x7fffffffffffffff",
"eip160Transition": "0x7fffffffffffffff", "eip160Transition": "0x7fffffffffffffff",
@ -16,7 +17,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",

View File

@ -6,6 +6,7 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0x0", "homesteadTransition": "0x0",
"eip150Transition": "0x7fffffffffffffff", "eip150Transition": "0x7fffffffffffffff",
"eip160Transition": "0x7fffffffffffffff", "eip160Transition": "0x7fffffffffffffff",
@ -16,7 +17,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",

View File

@ -5,6 +5,7 @@
"authorityRound": { "authorityRound": {
"params": { "params": {
"stepDuration": "4", "stepDuration": "4",
"blockReward": "0x4563918244F40000",
"validators" : { "validators" : {
"list": [ "list": [
"0x00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED", "0x00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED",
@ -29,7 +30,6 @@
"params": { "params": {
"gasLimitBoundDivisor": "0x400", "gasLimitBoundDivisor": "0x400",
"registrar" : "0xfAb104398BBefbd47752E7702D9fE23047E1Bca3", "registrar" : "0xfAb104398BBefbd47752E7702D9fE23047E1Bca3",
"blockReward": "0x4563918244F40000",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x2A", "networkID" : "0x2A",

View File

@ -7,6 +7,7 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": 494000, "homesteadTransition": 494000,
"eip150Transition": 1783000, "eip150Transition": 1783000,
"eip160Transition": 1915000, "eip160Transition": 1915000,
@ -20,7 +21,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar": "0x52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d", "registrar": "0x52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d",
"accountStartNonce": "0x0100000", "accountStartNonce": "0x0100000",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",

View File

@ -6,6 +6,7 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x08", "durationLimit": "0x08",
"blockReward": "0x14D1120D7B160000",
"homesteadTransition": "0x7fffffffffffffff", "homesteadTransition": "0x7fffffffffffffff",
"eip150Transition": "0x7fffffffffffffff", "eip150Transition": "0x7fffffffffffffff",
"eip160Transition": "0x7fffffffffffffff", "eip160Transition": "0x7fffffffffffffff",
@ -16,7 +17,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x14D1120D7B160000",
"registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050", "registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x0400", "maximumExtraDataSize": "0x0400",

View File

@ -7,12 +7,12 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": 0, "homesteadTransition": 0,
"eip150Transition": 0, "eip150Transition": 0,
"eip160Transition": 10, "eip160Transition": 10,
"eip161abcTransition": 10, "eip161abcTransition": 10,
"eip161dTransition": 10, "eip161dTransition": 10,
"maxCodeSize": 24576,
"eip649Reward": "0x29A2241AF62C0000", "eip649Reward": "0x29A2241AF62C0000",
"eip100bTransition": 1700000, "eip100bTransition": 1700000,
"eip649Transition": 1700000 "eip649Transition": 1700000
@ -21,7 +21,6 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar": "0x81a4b044831c4f12ba601adb9274516939e9b8a2", "registrar": "0x81a4b044831c4f12ba601adb9274516939e9b8a2",
"accountStartNonce": "0x0", "accountStartNonce": "0x0",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
@ -29,6 +28,7 @@
"networkID" : "0x3", "networkID" : "0x3",
"forkBlock": 641350, "forkBlock": 641350,
"forkCanonHash": "0x8033403e9fe5811a7b6d6b469905915de1c59207ce2172cbcf5d6ff14fa6a2eb", "forkCanonHash": "0x8033403e9fe5811a7b6d6b469905915de1c59207ce2172cbcf5d6ff14fa6a2eb",
"maxCodeSize": 24576,
"eip155Transition": 10, "eip155Transition": 10,
"eip98Transition": "0x7fffffffffffff", "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff", "eip86Transition": "0x7fffffffffffff",

View File

@ -6,12 +6,12 @@
"minimumDifficulty": "0x020000", "minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800", "difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d", "durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"homesteadTransition": "0", "homesteadTransition": "0",
"eip150Transition": "0", "eip150Transition": "0",
"eip160Transition": "0", "eip160Transition": "0",
"eip161abcTransition": "0", "eip161abcTransition": "0",
"eip161dTransition": "0", "eip161dTransition": "0",
"maxCodeSize": 24576,
"eip649Reward": "0x29A2241AF62C0000", "eip649Reward": "0x29A2241AF62C0000",
"eip100bTransition": "5", "eip100bTransition": "5",
"eip649Transition": "5" "eip649Transition": "5"
@ -20,12 +20,12 @@
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"maxCodeSize": 24576,
"eip98Transition": "5", "eip98Transition": "5",
"eip140Transition": "5", "eip140Transition": "5",
"eip211Transition": "5", "eip211Transition": "5",

View File

@ -1,7 +1,9 @@
{ {
"name": "Morden", "name": "Morden",
"engine": { "engine": {
"null": null "null": {
"params": {}
}
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",

View File

@ -1,7 +1,9 @@
{ {
"name": "Morden", "name": "Morden",
"engine": { "engine": {
"null": null "null": {
"params": {}
}
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",

View File

@ -1,15 +1,18 @@
{ {
"name": "Morden", "name": "Morden",
"engine": { "engine": {
"null": null "null": {
"params": {
"blockReward": "0x4563918244F40000"
}
}
}, },
"params": { "params": {
"gasLimitBoundDivisor": "0x0400", "gasLimitBoundDivisor": "0x0400",
"accountStartNonce": "0x0", "accountStartNonce": "0x0",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x2", "networkID" : "0x2"
"blockReward": "0x4563918244F40000"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -31,7 +31,7 @@ use unexpected::{Mismatch, OutOfBounds};
use basic_types::{LogBloom, Seal}; use basic_types::{LogBloom, Seal};
use vm::{EnvInfo, LastHashes}; use vm::{EnvInfo, LastHashes};
use engines::Engine; use engines::EthEngine;
use error::{Error, BlockError, TransactionError}; use error::{Error, BlockError, TransactionError};
use factory::Factories; use factory::Factories;
use header::Header; use header::Header;
@ -97,6 +97,7 @@ pub struct ExecutedBlock {
transactions_set: HashSet<H256>, transactions_set: HashSet<H256>,
state: State<StateDB>, state: State<StateDB>,
traces: Option<Vec<Vec<FlatTrace>>>, traces: Option<Vec<Vec<FlatTrace>>>,
last_hashes: Arc<LastHashes>,
} }
/// A set of references to `ExecutedBlock` fields that are publicly accessible. /// A set of references to `ExecutedBlock` fields that are publicly accessible.
@ -115,6 +116,17 @@ pub struct BlockRefMut<'a> {
pub traces: &'a mut Option<Vec<Vec<FlatTrace>>>, pub traces: &'a mut Option<Vec<Vec<FlatTrace>>>,
} }
impl<'a> BlockRefMut<'a> {
/// Add traces if tracing is enabled.
pub fn push_traces(&mut self, tracer: ::trace::ExecutiveTracer) {
use trace::Tracer;
if let Some(ref mut traces) = self.traces.as_mut() {
traces.push(tracer.drain())
}
}
}
/// A set of immutable references to `ExecutedBlock` fields that are publicly accessible. /// A set of immutable references to `ExecutedBlock` fields that are publicly accessible.
pub struct BlockRef<'a> { pub struct BlockRef<'a> {
/// Block header. /// Block header.
@ -133,7 +145,7 @@ pub struct BlockRef<'a> {
impl ExecutedBlock { impl ExecutedBlock {
/// Create a new block from the given `state`. /// Create a new block from the given `state`.
fn new(state: State<StateDB>, tracing: bool) -> ExecutedBlock { fn new(state: State<StateDB>, last_hashes: Arc<LastHashes>, tracing: bool) -> ExecutedBlock {
ExecutedBlock { ExecutedBlock {
header: Default::default(), header: Default::default(),
transactions: Default::default(), transactions: Default::default(),
@ -142,6 +154,7 @@ impl ExecutedBlock {
transactions_set: Default::default(), transactions_set: Default::default(),
state: state, state: state,
traces: if tracing {Some(Vec::new())} else {None}, traces: if tracing {Some(Vec::new())} else {None},
last_hashes: last_hashes,
} }
} }
@ -168,6 +181,20 @@ impl ExecutedBlock {
traces: &self.traces, traces: &self.traces,
} }
} }
/// Get the environment info concerning this block.
pub fn env_info(&self) -> EnvInfo {
// TODO: memoise.
EnvInfo {
number: self.header.number(),
author: self.header.author().clone(),
timestamp: self.header.timestamp(),
difficulty: self.header.difficulty().clone(),
last_hashes: self.last_hashes.clone(),
gas_used: self.receipts.last().map_or(U256::zero(), |r| r.gas_used),
gas_limit: self.header.gas_limit().clone(),
}
}
} }
/// Trait for a object that is a `ExecutedBlock`. /// Trait for a object that is a `ExecutedBlock`.
@ -216,14 +243,33 @@ impl IsBlock for ExecutedBlock {
fn block(&self) -> &ExecutedBlock { self } fn block(&self) -> &ExecutedBlock { self }
} }
impl ::parity_machine::LiveBlock for ExecutedBlock {
type Header = Header;
fn header(&self) -> &Header {
&self.header
}
fn uncles(&self) -> &[Header] {
&self.uncles
}
}
impl ::parity_machine::Transactions for ExecutedBlock {
type Transaction = SignedTransaction;
fn transactions(&self) -> &[SignedTransaction] {
&self.transactions
}
}
/// Block that is ready for transactions to be added. /// Block that is ready for transactions to be added.
/// ///
/// It's a bit like a Vec<Transaction>, except that whenever a transaction is pushed, we execute it and /// It's a bit like a Vec<Transaction>, except that whenever a transaction is pushed, we execute it and
/// maintain the system `state()`. We also archive execution receipts in preparation for later block creation. /// maintain the system `state()`. We also archive execution receipts in preparation for later block creation.
pub struct OpenBlock<'x> { pub struct OpenBlock<'x> {
block: ExecutedBlock, block: ExecutedBlock,
engine: &'x Engine, engine: &'x EthEngine,
last_hashes: Arc<LastHashes>,
} }
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, /// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
@ -234,7 +280,6 @@ pub struct OpenBlock<'x> {
pub struct ClosedBlock { pub struct ClosedBlock {
block: ExecutedBlock, block: ExecutedBlock,
uncle_bytes: Bytes, uncle_bytes: Bytes,
last_hashes: Arc<LastHashes>,
unclosed_state: State<StateDB>, unclosed_state: State<StateDB>,
} }
@ -259,7 +304,7 @@ impl<'x> OpenBlock<'x> {
#[cfg_attr(feature="dev", allow(too_many_arguments))] #[cfg_attr(feature="dev", allow(too_many_arguments))]
/// Create a new `OpenBlock` ready for transaction pushing. /// Create a new `OpenBlock` ready for transaction pushing.
pub fn new( pub fn new(
engine: &'x Engine, engine: &'x EthEngine,
factories: Factories, factories: Factories,
tracing: bool, tracing: bool,
db: StateDB, db: StateDB,
@ -273,9 +318,8 @@ impl<'x> OpenBlock<'x> {
let number = parent.number() + 1; let number = parent.number() + 1;
let state = State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(number), factories)?; let state = State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(number), factories)?;
let mut r = OpenBlock { let mut r = OpenBlock {
block: ExecutedBlock::new(state, tracing), block: ExecutedBlock::new(state, last_hashes, tracing),
engine: engine, engine: engine,
last_hashes: last_hashes.clone(),
}; };
r.block.header.set_parent_hash(parent.hash()); r.block.header.set_parent_hash(parent.hash());
@ -287,8 +331,12 @@ impl<'x> OpenBlock<'x> {
let gas_floor_target = cmp::max(gas_range_target.0, engine.params().min_gas_limit); let gas_floor_target = cmp::max(gas_range_target.0, engine.params().min_gas_limit);
let gas_ceil_target = cmp::max(gas_range_target.1, gas_floor_target); let gas_ceil_target = cmp::max(gas_range_target.1, gas_floor_target);
engine.populate_from_parent(&mut r.block.header, parent, gas_floor_target, gas_ceil_target);
engine.on_new_block(&mut r.block, last_hashes, is_epoch_begin)?; engine.machine().populate_from_parent(&mut r.block.header, parent, gas_floor_target, gas_ceil_target);
engine.populate_from_parent(&mut r.block.header, parent);
engine.machine().on_new_block(&mut r.block)?;
engine.on_new_block(&mut r.block, is_epoch_begin)?;
Ok(r) Ok(r)
} }
@ -343,16 +391,7 @@ impl<'x> OpenBlock<'x> {
/// Get the environment info concerning this block. /// Get the environment info concerning this block.
pub fn env_info(&self) -> EnvInfo { pub fn env_info(&self) -> EnvInfo {
// TODO: memoise. self.block.env_info()
EnvInfo {
number: self.block.header.number(),
author: self.block.header.author().clone(),
timestamp: self.block.header.timestamp(),
difficulty: self.block.header.difficulty().clone(),
last_hashes: self.last_hashes.clone(),
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
gas_limit: self.block.header.gas_limit().clone(),
}
} }
/// Push a transaction into the block. /// Push a transaction into the block.
@ -365,7 +404,7 @@ impl<'x> OpenBlock<'x> {
let env_info = self.env_info(); let env_info = self.env_info();
// info!("env_info says gas_used={}", env_info.gas_used); // info!("env_info says gas_used={}", env_info.gas_used);
match self.block.state.apply(&env_info, self.engine, &t, self.block.traces.is_some()) { match self.block.state.apply(&env_info, self.engine.machine(), &t, self.block.traces.is_some()) {
Ok(outcome) => { Ok(outcome) => {
self.block.transactions_set.insert(h.unwrap_or_else(||t.hash())); self.block.transactions_set.insert(h.unwrap_or_else(||t.hash()));
self.block.transactions.push(t.into()); self.block.transactions.push(t.into());
@ -418,7 +457,6 @@ impl<'x> OpenBlock<'x> {
ClosedBlock { ClosedBlock {
block: s.block, block: s.block,
uncle_bytes: uncle_bytes, uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes,
unclosed_state: unclosed_state, unclosed_state: unclosed_state,
} }
} }
@ -485,14 +523,13 @@ impl ClosedBlock {
} }
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
pub fn reopen(self, engine: &Engine) -> OpenBlock { pub fn reopen(self, engine: &EthEngine) -> OpenBlock {
// revert rewards (i.e. set state back at last transaction's state). // revert rewards (i.e. set state back at last transaction's state).
let mut block = self.block; let mut block = self.block;
block.state = self.unclosed_state; block.state = self.unclosed_state;
OpenBlock { OpenBlock {
block: block, block: block,
engine: engine, engine: engine,
last_hashes: self.last_hashes,
} }
} }
} }
@ -504,7 +541,7 @@ impl LockedBlock {
/// Provide a valid seal in order to turn this into a `SealedBlock`. /// Provide a valid seal in order to turn this into a `SealedBlock`.
/// ///
/// NOTE: This does not check the validity of `seal` with the engine. /// NOTE: This does not check the validity of `seal` with the engine.
pub fn seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, BlockError> { pub fn seal(self, engine: &EthEngine, seal: Vec<Bytes>) -> Result<SealedBlock, BlockError> {
let mut s = self; let mut s = self;
if seal.len() != engine.seal_fields() { if seal.len() != engine.seal_fields() {
return Err(BlockError::InvalidSealArity(Mismatch{expected: engine.seal_fields(), found: seal.len()})); return Err(BlockError::InvalidSealArity(Mismatch{expected: engine.seal_fields(), found: seal.len()}));
@ -518,12 +555,14 @@ impl LockedBlock {
/// Returns the `ClosedBlock` back again if the seal is no good. /// Returns the `ClosedBlock` back again if the seal is no good.
pub fn try_seal( pub fn try_seal(
self, self,
engine: &Engine, engine: &EthEngine,
seal: Vec<Bytes>, seal: Vec<Bytes>,
) -> Result<SealedBlock, (Error, LockedBlock)> { ) -> Result<SealedBlock, (Error, LockedBlock)> {
let mut s = self; let mut s = self;
s.block.header.set_seal(seal); s.block.header.set_seal(seal);
match engine.verify_block_seal(&s.block.header) {
// TODO: passing state context to avoid engines owning it?
match engine.verify_local_seal(&s.block.header) {
Err(e) => Err((e, s)), Err(e) => Err((e, s)),
_ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }), _ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }),
} }
@ -575,7 +614,7 @@ pub fn enact(
header: &Header, header: &Header,
transactions: &[SignedTransaction], transactions: &[SignedTransaction],
uncles: &[Header], uncles: &[Header],
engine: &Engine, engine: &EthEngine,
tracing: bool, tracing: bool,
db: StateDB, db: StateDB,
parent: &Header, parent: &Header,
@ -647,7 +686,7 @@ fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction])
#[cfg_attr(feature="dev", allow(too_many_arguments))] #[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact_verified( pub fn enact_verified(
block: &PreverifiedBlock, block: &PreverifiedBlock,
engine: &Engine, engine: &EthEngine,
tracing: bool, tracing: bool,
db: StateDB, db: StateDB,
parent: &Header, parent: &Header,
@ -675,7 +714,7 @@ pub fn enact_verified(
mod tests { mod tests {
use tests::helpers::*; use tests::helpers::*;
use super::*; use super::*;
use engines::Engine; use engines::EthEngine;
use vm::LastHashes; use vm::LastHashes;
use error::Error; use error::Error;
use header::Header; use header::Header;
@ -690,7 +729,7 @@ mod tests {
#[cfg_attr(feature="dev", allow(too_many_arguments))] #[cfg_attr(feature="dev", allow(too_many_arguments))]
fn enact_bytes( fn enact_bytes(
block_bytes: &[u8], block_bytes: &[u8],
engine: &Engine, engine: &EthEngine,
tracing: bool, tracing: bool,
db: StateDB, db: StateDB,
parent: &Header, parent: &Header,
@ -737,7 +776,7 @@ mod tests {
#[cfg_attr(feature="dev", allow(too_many_arguments))] #[cfg_attr(feature="dev", allow(too_many_arguments))]
fn enact_and_seal( fn enact_and_seal(
block_bytes: &[u8], block_bytes: &[u8],
engine: &Engine, engine: &EthEngine,
tracing: bool, tracing: bool,
db: StateDB, db: StateDB,
parent: &Header, parent: &Header,

View File

@ -19,8 +19,9 @@
use std::sync::Arc; use std::sync::Arc;
use blockchain::BlockChain; use blockchain::BlockChain;
use engines::{Engine, EpochVerifier}; use engines::{EthEngine, EpochVerifier};
use header::Header; use header::Header;
use machine::EthereumMachine;
use rand::Rng; use rand::Rng;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -31,13 +32,13 @@ const HEAVY_VERIFY_RATE: f32 = 0.02;
/// Ancient block verifier: import an ancient sequence of blocks in order from a starting /// Ancient block verifier: import an ancient sequence of blocks in order from a starting
/// epoch. /// epoch.
pub struct AncientVerifier { pub struct AncientVerifier {
cur_verifier: RwLock<Box<EpochVerifier>>, cur_verifier: RwLock<Box<EpochVerifier<EthereumMachine>>>,
engine: Arc<Engine>, engine: Arc<EthEngine>,
} }
impl AncientVerifier { impl AncientVerifier {
/// Create a new ancient block verifier with the given engine and initial verifier. /// Create a new ancient block verifier with the given engine and initial verifier.
pub fn new(engine: Arc<Engine>, start_verifier: Box<EpochVerifier>) -> Self { pub fn new(engine: Arc<EthEngine>, start_verifier: Box<EpochVerifier<EthereumMachine>>) -> Self {
AncientVerifier { AncientVerifier {
cur_verifier: RwLock::new(start_verifier), cur_verifier: RwLock::new(start_verifier),
engine: engine, engine: engine,

View File

@ -46,7 +46,7 @@ use client::{
ChainNotify, PruningInfo, ProvingBlockChainClient, ChainNotify, PruningInfo, ProvingBlockChainClient,
}; };
use encoded; use encoded;
use engines::{Engine, EpochTransition}; use engines::{EthEngine, EpochTransition};
use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError};
use vm::{EnvInfo, LastHashes}; use vm::{EnvInfo, LastHashes};
use evm::{Factory as EvmFactory, Schedule}; use evm::{Factory as EvmFactory, Schedule};
@ -147,7 +147,7 @@ pub struct Client {
mode: Mutex<Mode>, mode: Mutex<Mode>,
chain: RwLock<Arc<BlockChain>>, chain: RwLock<Arc<BlockChain>>,
tracedb: RwLock<TraceDB<BlockChain>>, tracedb: RwLock<TraceDB<BlockChain>>,
engine: Arc<Engine>, engine: Arc<EthEngine>,
config: ClientConfig, config: ClientConfig,
pruning: journaldb::Algorithm, pruning: journaldb::Algorithm,
db: RwLock<Arc<KeyValueDB>>, db: RwLock<Arc<KeyValueDB>>,
@ -332,7 +332,7 @@ impl Client {
} }
/// Returns engine reference. /// Returns engine reference.
pub fn engine(&self) -> &Engine { pub fn engine(&self) -> &EthEngine {
&*self.engine &*self.engine
} }
@ -421,22 +421,34 @@ impl Client {
return Err(()); return Err(());
} }
// Check if parent is in chain
let parent = match chain.block_header(header.parent_hash()) {
Some(h) => h,
None => {
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash());
return Err(());
}
};
// Verify Block Family // Verify Block Family
let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, &**chain); let verify_family_result = self.verifier.verify_block_family(
header,
&parent,
engine,
Some((&block.bytes, &block.transactions, &**chain, self)),
);
if let Err(e) = verify_family_result { if let Err(e) = verify_family_result {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
}; };
let verify_external_result = self.verifier.verify_block_external(header, &block.bytes, engine); let verify_external_result = self.verifier.verify_block_external(header, engine);
if let Err(e) = verify_external_result { if let Err(e) = verify_external_result {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
}; };
// Check if Parent is in chain
let chain_has_parent = chain.block_header(header.parent_hash());
if let Some(parent) = chain_has_parent {
// Enact Verified Block // Enact Verified Block
let last_hashes = self.build_last_hashes(header.parent_hash().clone()); let last_hashes = self.build_last_hashes(header.parent_hash().clone());
let db = self.state_db.lock().boxed_clone_canon(header.parent_hash()); let db = self.state_db.lock().boxed_clone_canon(header.parent_hash());
@ -466,10 +478,6 @@ impl Client {
} }
Ok(locked_block) Ok(locked_block)
} else {
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash());
Err(())
}
} }
fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec<H256>, Vec<H256>) { fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec<H256>, Vec<H256>) {
@ -721,7 +729,12 @@ impl Client {
use engines::EpochChange; use engines::EpochChange;
let hash = header.hash(); let hash = header.hash();
match self.engine.signals_epoch_end(header, Some(block_bytes), Some(&receipts)) { let auxiliary = ::machine::AuxiliaryData {
bytes: Some(block_bytes),
receipts: Some(&receipts),
};
match self.engine.signals_epoch_end(header, auxiliary) {
EpochChange::Yes(proof) => { EpochChange::Yes(proof) => {
use engines::epoch::PendingTransition; use engines::epoch::PendingTransition;
use engines::Proof; use engines::Proof;
@ -754,7 +767,7 @@ impl Client {
).expect("state known to be available for just-imported block; qed"); ).expect("state known to be available for just-imported block; qed");
let options = TransactOptions::with_no_tracing().dont_check_nonce(); let options = TransactOptions::with_no_tracing().dont_check_nonce();
let res = Executive::new(&mut state, &env_info, &*self.engine) let res = Executive::new(&mut state, &env_info, self.engine.machine())
.transact(&transaction, options); .transact(&transaction, options);
let res = match res { let res = match res {
@ -821,7 +834,7 @@ impl Client {
// use a state-proving closure for the given block. // use a state-proving closure for the given block.
fn with_proving_caller<F, T>(&self, id: BlockId, with_call: F) -> T fn with_proving_caller<F, T>(&self, id: BlockId, with_call: F) -> T
where F: FnOnce(&::engines::Call) -> T where F: FnOnce(&::machine::Call) -> T
{ {
let call = |a, d| { let call = |a, d| {
let tx = self.contract_call_tx(id, a, d); let tx = self.contract_call_tx(id, a, d);
@ -1119,15 +1132,14 @@ impl Client {
} }
fn do_virtual_call(&self, env_info: &EnvInfo, state: &mut State<StateDB>, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, CallError> { fn do_virtual_call(&self, env_info: &EnvInfo, state: &mut State<StateDB>, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, CallError> {
fn call<E, V, T>( fn call<V, T>(
state: &mut State<StateDB>, state: &mut State<StateDB>,
env_info: &EnvInfo, env_info: &EnvInfo,
engine: &E, machine: &::machine::EthereumMachine,
state_diff: bool, state_diff: bool,
transaction: &SignedTransaction, transaction: &SignedTransaction,
options: TransactOptions<T, V>, options: TransactOptions<T, V>,
) -> Result<Executed, CallError> where ) -> Result<Executed, CallError> where
E: Engine + ?Sized,
T: trace::Tracer, T: trace::Tracer,
V: trace::VMTracer, V: trace::VMTracer,
{ {
@ -1136,7 +1148,7 @@ impl Client {
.save_output_from_contract(); .save_output_from_contract();
let original_state = if state_diff { Some(state.clone()) } else { None }; let original_state = if state_diff { Some(state.clone()) } else { None };
let mut ret = Executive::new(state, env_info, engine).transact_virtual(transaction, options)?; let mut ret = Executive::new(state, env_info, machine).transact_virtual(transaction, options)?;
if let Some(original) = original_state { if let Some(original) = original_state {
ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?); ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?);
@ -1145,13 +1157,13 @@ impl Client {
} }
let state_diff = analytics.state_diffing; let state_diff = analytics.state_diffing;
let engine = &*self.engine; let machine = self.engine.machine();
match (analytics.transaction_tracing, analytics.vm_tracing) { match (analytics.transaction_tracing, analytics.vm_tracing) {
(true, true) => call(state, env_info, engine, state_diff, t, TransactOptions::with_tracing_and_vm_tracing()), (true, true) => call(state, env_info, machine, state_diff, t, TransactOptions::with_tracing_and_vm_tracing()),
(true, false) => call(state, env_info, engine, state_diff, t, TransactOptions::with_tracing()), (true, false) => call(state, env_info, machine, state_diff, t, TransactOptions::with_tracing()),
(false, true) => call(state, env_info, engine, state_diff, t, TransactOptions::with_vm_tracing()), (false, true) => call(state, env_info, machine, state_diff, t, TransactOptions::with_vm_tracing()),
(false, false) => call(state, env_info, engine, state_diff, t, TransactOptions::with_no_tracing()), (false, false) => call(state, env_info, machine, state_diff, t, TransactOptions::with_no_tracing()),
} }
} }
@ -1235,7 +1247,7 @@ impl BlockChainClient for Client {
let tx = tx.fake_sign(sender); let tx = tx.fake_sign(sender);
let mut state = original_state.clone(); let mut state = original_state.clone();
Ok(Executive::new(&mut state, &env_info, &*self.engine) Ok(Executive::new(&mut state, &env_info, self.engine.machine())
.transact_virtual(&tx, options()) .transact_virtual(&tx, options())
.map(|r| r.exception.is_none()) .map(|r| r.exception.is_none())
.unwrap_or(false)) .unwrap_or(false))
@ -1296,7 +1308,7 @@ impl BlockChainClient for Client {
let rest = txs.split_off(address.index); let rest = txs.split_off(address.index);
for t in txs { for t in txs {
let t = SignedTransaction::new(t).expect(PROOF); let t = SignedTransaction::new(t).expect(PROOF);
let x = Executive::new(&mut state, &env_info, &*self.engine).transact(&t, TransactOptions::with_no_tracing())?; let x = Executive::new(&mut state, &env_info, self.engine.machine()).transact(&t, TransactOptions::with_no_tracing())?;
env_info.gas_used = env_info.gas_used + x.gas_used; env_info.gas_used = env_info.gas_used + x.gas_used;
} }
let first = rest.into_iter().next().expect("We split off < `address.index`; Length is checked earlier; qed"); let first = rest.into_iter().next().expect("We split off < `address.index`; Length is checked earlier; qed");
@ -1575,7 +1587,7 @@ impl BlockChainClient for Client {
.collect(); .collect();
match (transaction, previous_receipts) { match (transaction, previous_receipts) {
(Some(transaction), Some(previous_receipts)) => { (Some(transaction), Some(previous_receipts)) => {
Some(transaction_receipt(self.engine(), transaction, previous_receipts)) Some(transaction_receipt(self.engine().machine(), transaction, previous_receipts))
}, },
_ => None, _ => None,
} }
@ -1680,14 +1692,22 @@ impl BlockChainClient for Client {
match (start, end) { match (start, end) {
(Some(s), Some(e)) => { (Some(s), Some(e)) => {
let filter = trace::Filter { let db_filter = trace::Filter {
range: s as usize..e as usize, range: s as usize..e as usize,
from_address: From::from(filter.from_address), from_address: From::from(filter.from_address),
to_address: From::from(filter.to_address), to_address: From::from(filter.to_address),
}; };
let traces = self.tracedb.read().filter(&filter); let traces = self.tracedb.read().filter(&db_filter);
Some(traces) if traces.is_empty() {
return Some(vec![]);
}
let traces_iter = traces.into_iter().skip(filter.after.unwrap_or(0));
Some(match filter.count {
Some(count) => traces_iter.take(count).collect(),
None => traces_iter.collect(),
})
}, },
_ => None, _ => None,
} }
@ -1995,7 +2015,7 @@ impl ProvingBlockChainClient for Client {
jdb.as_hashdb_mut(), jdb.as_hashdb_mut(),
header.state_root().clone(), header.state_root().clone(),
&transaction, &transaction,
&*self.engine, self.engine.machine(),
&env_info, &env_info,
self.factories.clone(), self.factories.clone(),
false, false,
@ -2018,7 +2038,7 @@ impl Drop for Client {
/// Returns `LocalizedReceipt` given `LocalizedTransaction` /// Returns `LocalizedReceipt` given `LocalizedTransaction`
/// and a vector of receipts from given block up to transaction index. /// and a vector of receipts from given block up to transaction index.
fn transaction_receipt(engine: &Engine, mut tx: LocalizedTransaction, mut receipts: Vec<Receipt>) -> LocalizedReceipt { fn transaction_receipt(machine: &::machine::EthereumMachine, mut tx: LocalizedTransaction, mut receipts: Vec<Receipt>) -> LocalizedReceipt {
assert_eq!(receipts.len(), tx.transaction_index + 1, "All previous receipts are provided."); assert_eq!(receipts.len(), tx.transaction_index + 1, "All previous receipts are provided.");
let sender = tx.sender(); let sender = tx.sender();
@ -2042,7 +2062,7 @@ fn transaction_receipt(engine: &Engine, mut tx: LocalizedTransaction, mut receip
gas_used: receipt.gas_used - prior_gas_used, gas_used: receipt.gas_used - prior_gas_used,
contract_address: match tx.action { contract_address: match tx.action {
Action::Call(_) => None, Action::Call(_) => None,
Action::Create => Some(contract_address(engine.create_address_scheme(block_number), &sender, &tx.nonce, &tx.data).0) Action::Create => Some(contract_address(machine.create_address_scheme(block_number), &sender, &tx.nonce, &tx.data).0)
}, },
logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry {
entry: log, entry: log,
@ -2102,12 +2122,11 @@ mod tests {
use log_entry::{LogEntry, LocalizedLogEntry}; use log_entry::{LogEntry, LocalizedLogEntry};
use receipt::{Receipt, LocalizedReceipt, TransactionOutcome}; use receipt::{Receipt, LocalizedReceipt, TransactionOutcome};
use transaction::{Transaction, LocalizedTransaction, Action}; use transaction::{Transaction, LocalizedTransaction, Action};
use tests::helpers::TestEngine;
// given // given
let key = KeyPair::from_secret_slice(&keccak("test")).unwrap(); let key = KeyPair::from_secret_slice(&keccak("test")).unwrap();
let secret = key.secret(); let secret = key.secret();
let engine = TestEngine::new(0); let machine = ::ethereum::new_frontier_test_machine();
let block_number = 1; let block_number = 1;
let block_hash = 5.into(); let block_hash = 5.into();
@ -2151,7 +2170,7 @@ mod tests {
}]; }];
// when // when
let receipt = transaction_receipt(&engine, transaction, receipts); let receipt = transaction_receipt(&machine, transaction, receipts);
// then // then
assert_eq!(receipt, LocalizedReceipt { assert_eq!(receipt, LocalizedReceipt {

View File

@ -181,7 +181,7 @@ impl<'a> EvmTestClient<'a> {
let mut substate = state::Substate::new(); let mut substate = state::Substate::new();
let mut tracer = trace::NoopTracer; let mut tracer = trace::NoopTracer;
let mut output = vec![]; let mut output = vec![];
let mut executive = executive::Executive::new(&mut self.state, &info, &*self.spec.engine); let mut executive = executive::Executive::new(&mut self.state, &info, self.spec.engine.machine());
executive.call( executive.call(
params, params,
&mut substate, &mut substate,
@ -211,7 +211,7 @@ impl<'a> EvmTestClient<'a> {
// Apply transaction // Apply transaction
let tracer = trace::NoopTracer; let tracer = trace::NoopTracer;
let result = self.state.apply_with_tracing(&env_info, &*self.spec.engine, &transaction, tracer, vm_tracer); let result = self.state.apply_with_tracing(&env_info, self.spec.engine.machine(), &transaction, tracer, vm_tracer);
match result { match result {
Ok(result) => { Ok(result) => {

View File

@ -19,19 +19,16 @@
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
use std::sync::{Weak, Arc}; use std::sync::{Weak, Arc};
use std::time::{UNIX_EPOCH, Duration}; use std::time::{UNIX_EPOCH, Duration};
use std::collections::{BTreeMap, HashSet, HashMap}; use std::collections::{BTreeMap, HashSet};
use std::cmp;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::*; use block::*;
use builtin::Builtin;
use client::EngineClient; use client::EngineClient;
use engines::{Call, Engine, Seal, EngineError, ConstructedVerifier}; use engines::{Engine, Seal, EngineError, ConstructedVerifier};
use error::{Error, TransactionError, BlockError}; use error::{Error, BlockError};
use ethjson; use ethjson;
use machine::{AuxiliaryData, Call, EthereumMachine};
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use spec::CommonParams;
use transaction::UnverifiedTransaction;
use super::signer::EngineSigner; use super::signer::EngineSigner;
use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set};
@ -66,6 +63,8 @@ pub struct AuthorityRoundParams {
pub validate_step_transition: u64, pub validate_step_transition: u64,
/// Immediate transitions. /// Immediate transitions.
pub immediate_transitions: bool, pub immediate_transitions: bool,
/// Block reward in base units.
pub block_reward: U256,
} }
impl From<ethjson::spec::AuthorityRoundParams> for AuthorityRoundParams { impl From<ethjson::spec::AuthorityRoundParams> for AuthorityRoundParams {
@ -77,6 +76,7 @@ impl From<ethjson::spec::AuthorityRoundParams> for AuthorityRoundParams {
validate_score_transition: p.validate_score_transition.map_or(0, Into::into), validate_score_transition: p.validate_score_transition.map_or(0, Into::into),
validate_step_transition: p.validate_step_transition.map_or(0, Into::into), validate_step_transition: p.validate_step_transition.map_or(0, Into::into),
immediate_transitions: p.immediate_transitions.unwrap_or(false), immediate_transitions: p.immediate_transitions.unwrap_or(false),
block_reward: p.block_reward.map_or_else(Default::default, Into::into),
} }
} }
} }
@ -138,7 +138,7 @@ impl EpochManager {
} }
// zoom to epoch for given header. returns true if succeeded, false otherwise. // zoom to epoch for given header. returns true if succeeded, false otherwise.
fn zoom_to(&mut self, client: &EngineClient, engine: &Engine, validators: &ValidatorSet, header: &Header) -> bool { fn zoom_to(&mut self, client: &EngineClient, machine: &EthereumMachine, validators: &ValidatorSet, header: &Header) -> bool {
let last_was_parent = self.finality_checker.subchain_head() == Some(header.parent_hash().clone()); let last_was_parent = self.finality_checker.subchain_head() == Some(header.parent_hash().clone());
// early exit for current target == chain head, but only if the epochs are // early exit for current target == chain head, but only if the epochs are
@ -164,7 +164,6 @@ impl EpochManager {
} }
}; };
// extract other epoch set if it's not the same as the last. // extract other epoch set if it's not the same as the last.
if last_transition.block_hash != self.epoch_transition_hash { if last_transition.block_hash != self.epoch_transition_hash {
let (signal_number, set_proof, _) = destructure_proofs(&last_transition.proof) let (signal_number, set_proof, _) = destructure_proofs(&last_transition.proof)
@ -176,7 +175,7 @@ impl EpochManager {
let first = signal_number == 0; let first = signal_number == 0;
let epoch_set = validators.epoch_set( let epoch_set = validators.epoch_set(
first, first,
engine, machine,
signal_number, // use signal number so multi-set first calculation is correct. signal_number, // use signal number so multi-set first calculation is correct.
set_proof, set_proof,
) )
@ -208,8 +207,6 @@ impl EpochManager {
/// Engine using `AuthorityRound` proof-of-authority BFT consensus. /// Engine using `AuthorityRound` proof-of-authority BFT consensus.
pub struct AuthorityRound { pub struct AuthorityRound {
params: CommonParams,
builtins: BTreeMap<Address, Builtin>,
transition_service: IoService<()>, transition_service: IoService<()>,
step: Arc<Step>, step: Arc<Step>,
can_propose: AtomicBool, can_propose: AtomicBool,
@ -220,6 +217,8 @@ pub struct AuthorityRound {
validate_step_transition: u64, validate_step_transition: u64,
epoch_manager: Mutex<EpochManager>, epoch_manager: Mutex<EpochManager>,
immediate_transitions: bool, immediate_transitions: bool,
block_reward: U256,
machine: EthereumMachine,
} }
// header-chain validator. // header-chain validator.
@ -228,7 +227,7 @@ struct EpochVerifier {
subchain_validators: SimpleList, subchain_validators: SimpleList,
} }
impl super::EpochVerifier for EpochVerifier { impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
fn verify_light(&self, header: &Header) -> Result<(), Error> { fn verify_light(&self, header: &Header) -> Result<(), Error> {
// always check the seal since it's fast. // always check the seal since it's fast.
// nothing heavier to do. // nothing heavier to do.
@ -250,7 +249,6 @@ impl super::EpochVerifier for EpochVerifier {
let headers: Vec<Header> = otry!(UntrustedRlp::new(proof).as_list().ok()); let headers: Vec<Header> = otry!(UntrustedRlp::new(proof).as_list().ok());
for header in &headers { for header in &headers {
// ensure all headers have correct number of seal fields so we can `verify_external` // ensure all headers have correct number of seal fields so we can `verify_external`
// without panic. // without panic.
@ -347,13 +345,11 @@ impl AsMillis for Duration {
impl AuthorityRound { impl AuthorityRound {
/// Create a new instance of AuthorityRound engine. /// Create a new instance of AuthorityRound engine.
pub fn new(params: CommonParams, our_params: AuthorityRoundParams, builtins: BTreeMap<Address, Builtin>) -> Result<Arc<Self>, Error> { pub fn new(our_params: AuthorityRoundParams, machine: EthereumMachine) -> Result<Arc<Self>, Error> {
let should_timeout = our_params.start_step.is_none(); let should_timeout = our_params.start_step.is_none();
let initial_step = our_params.start_step.unwrap_or_else(|| (unix_now().as_secs() / our_params.step_duration.as_secs())) as usize; let initial_step = our_params.start_step.unwrap_or_else(|| (unix_now().as_secs() / our_params.step_duration.as_secs())) as usize;
let engine = Arc::new( let engine = Arc::new(
AuthorityRound { AuthorityRound {
params: params,
builtins: builtins,
transition_service: IoService::<()>::start()?, transition_service: IoService::<()>::start()?,
step: Arc::new(Step { step: Arc::new(Step {
inner: AtomicUsize::new(initial_step), inner: AtomicUsize::new(initial_step),
@ -368,6 +364,8 @@ impl AuthorityRound {
validate_step_transition: our_params.validate_step_transition, validate_step_transition: our_params.validate_step_transition,
epoch_manager: Mutex::new(EpochManager::blank()), epoch_manager: Mutex::new(EpochManager::blank()),
immediate_transitions: our_params.immediate_transitions, immediate_transitions: our_params.immediate_transitions,
block_reward: our_params.block_reward,
machine: machine,
}); });
// Do not initialize timeouts for tests. // Do not initialize timeouts for tests.
@ -410,22 +408,16 @@ impl IoHandler<()> for TransitionHandler {
} }
} }
impl Engine for AuthorityRound { impl Engine<EthereumMachine> for AuthorityRound {
fn name(&self) -> &str { "AuthorityRound" } fn name(&self) -> &str { "AuthorityRound" }
fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) } fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
fn machine(&self) -> &EthereumMachine { &self.machine }
/// Two fields - consensus step and the corresponding proposer signature. /// Two fields - consensus step and the corresponding proposer signature.
fn seal_fields(&self) -> usize { 2 } fn seal_fields(&self) -> usize { 2 }
fn params(&self) -> &CommonParams { &self.params }
fn additional_params(&self) -> HashMap<String, String> {
hash_map!["registrar".to_owned() => self.params().registrar.hex()]
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> { &self.builtins }
fn step(&self) { fn step(&self) {
self.step.increment(); self.step.increment();
self.can_propose.store(true, AtomicOrdering::SeqCst); self.can_propose.store(true, AtomicOrdering::SeqCst);
@ -444,19 +436,10 @@ impl Engine for AuthorityRound {
] ]
} }
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) { fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
// Chain scoring: total weight is sqrt(U256::max_value())*height - step // Chain scoring: total weight is sqrt(U256::max_value())*height - step
let new_difficulty = U256::from(U128::max_value()) + header_step(parent).expect("Header has been verified; qed").into() - self.step.load().into(); let new_difficulty = U256::from(U128::max_value()) + header_step(parent).expect("Header has been verified; qed").into() - self.step.load().into();
header.set_difficulty(new_difficulty); header.set_difficulty(new_difficulty);
header.set_gas_limit({
let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.params().gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
}
});
} }
fn seals_internally(&self) -> Option<bool> { fn seals_internally(&self) -> Option<bool> {
@ -491,7 +474,7 @@ impl Engine for AuthorityRound {
} }
}; };
if !epoch_manager.zoom_to(&*client, self, &*self.validators, header) { if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, header) {
debug!(target: "engine", "Unable to zoom to epoch."); debug!(target: "engine", "Unable to zoom to epoch.");
return Seal::None; return Seal::None;
} }
@ -518,15 +501,15 @@ impl Engine for AuthorityRound {
Seal::None Seal::None
} }
fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> {
Ok(())
}
fn on_new_block( fn on_new_block(
&self, &self,
block: &mut ExecutedBlock, block: &mut ExecutedBlock,
last_hashes: Arc<::vm::LastHashes>,
epoch_begin: bool, epoch_begin: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let parent_hash = block.fields().header.parent_hash().clone();
::engines::common::push_last_hash(block, last_hashes.clone(), self, &parent_hash)?;
// with immediate transitions, we don't use the epoch mechanism anyway. // with immediate transitions, we don't use the epoch mechanism anyway.
// the genesis is always considered an epoch, but we ignore it intentionally. // the genesis is always considered an epoch, but we ignore it intentionally.
if self.immediate_transitions || !epoch_begin { return Ok(()) } if self.immediate_transitions || !epoch_begin { return Ok(()) }
@ -536,10 +519,8 @@ impl Engine for AuthorityRound {
let first = header.number() == 0; let first = header.number() == 0;
let mut call = |to, data| { let mut call = |to, data| {
let result = ::engines::common::execute_as_system( let result = self.machine.execute_as_system(
block, block,
last_hashes.clone(),
self,
to, to,
U256::max_value(), // unbounded gas? maybe make configurable. U256::max_value(), // unbounded gas? maybe make configurable.
Some(data), Some(data),
@ -553,17 +534,13 @@ impl Engine for AuthorityRound {
/// Apply the block reward on finalisation of the block. /// Apply the block reward on finalisation of the block.
fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
::engines::common::bestow_block_reward(block, self) // TODO: move to "machine::WithBalances" trait.
::engines::common::bestow_block_reward(block, self.block_reward)
} }
/// Check the number of seal fields. /// Check the number of seal fields.
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_basic(&self, header: &Header,) -> Result<(), Error> {
if header.seal().len() != self.seal_fields() { if header.number() >= self.validate_score_transition && *header.difficulty() >= U256::from(U128::max_value()) {
trace!(target: "engine", "verify_block_basic: wrong number of seal fields");
Err(From::from(BlockError::InvalidSealArity(
Mismatch { expected: self.seal_fields(), found: header.seal().len() }
)))
} else if header.number() >= self.validate_score_transition && *header.difficulty() >= U256::from(U128::max_value()) {
Err(From::from(BlockError::DifficultyOutOfBounds( Err(From::from(BlockError::DifficultyOutOfBounds(
OutOfBounds { min: None, max: Some(U256::from(U128::max_value())), found: *header.difficulty() } OutOfBounds { min: None, max: Some(U256::from(U128::max_value())), found: *header.difficulty() }
))) )))
@ -572,19 +549,10 @@ impl Engine for AuthorityRound {
} }
} }
fn verify_block_unordered(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
Ok(())
}
/// Do the step and gas limit validation. /// Do the step and gas limit validation.
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> {
let step = header_step(header)?; let step = header_step(header)?;
// Do not calculate difficulty for genesis blocks.
if header.number() == 0 {
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
}
let parent_step = header_step(parent)?; let parent_step = header_step(parent)?;
// Ensure header is from the step after parent. // Ensure header is from the step after parent.
@ -595,6 +563,7 @@ impl Engine for AuthorityRound {
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default()); self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default());
Err(EngineError::DoubleVote(header.author().clone()))?; Err(EngineError::DoubleVote(header.author().clone()))?;
} }
// Report skipped primaries. // Report skipped primaries.
if let (true, Some(me)) = (step > parent_step + 1, self.signer.read().address()) { if let (true, Some(me)) = (step > parent_step + 1, self.signer.read().address()) {
debug!(target: "engine", "Author {} built block with step gap. current step: {}, parent step: {}", debug!(target: "engine", "Author {} built block with step gap. current step: {}, parent step: {}",
@ -611,17 +580,11 @@ impl Engine for AuthorityRound {
} }
} }
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() })));
}
Ok(()) Ok(())
} }
// Check the validators. // Check the validators.
fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_external(&self, header: &Header) -> Result<(), Error> {
// fetch correct validator set for current epoch, taking into account // fetch correct validator set for current epoch, taking into account
// finality of previous transitions. // finality of previous transitions.
let active_set; let active_set;
@ -639,7 +602,7 @@ impl Engine for AuthorityRound {
}; };
let mut epoch_manager = self.epoch_manager.lock(); let mut epoch_manager = self.epoch_manager.lock();
if !epoch_manager.zoom_to(&*client, self, &*self.validators, header) { if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, header) {
debug!(target: "engine", "Unable to zoom to epoch."); debug!(target: "engine", "Unable to zoom to epoch.");
return Err(EngineError::RequiresClient.into()) return Err(EngineError::RequiresClient.into())
} }
@ -667,19 +630,19 @@ impl Engine for AuthorityRound {
.map(|set_proof| combine_proofs(0, &set_proof, &[])) .map(|set_proof| combine_proofs(0, &set_proof, &[]))
} }
fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, header: &Header, aux: AuxiliaryData)
-> super::EpochChange -> super::EpochChange<EthereumMachine>
{ {
if self.immediate_transitions { return super::EpochChange::No } if self.immediate_transitions { return super::EpochChange::No }
let first = header.number() == 0; let first = header.number() == 0;
self.validators.signals_epoch_end(first, header, block, receipts) self.validators.signals_epoch_end(first, header, aux)
} }
fn is_epoch_end( fn is_epoch_end(
&self, &self,
chain_head: &Header, chain_head: &Header,
chain: &super::Headers, chain: &super::Headers<Header>,
transition_store: &super::PendingTransitionStore, transition_store: &super::PendingTransitionStore,
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
// epochs only matter if we want to support light clients. // epochs only matter if we want to support light clients.
@ -703,7 +666,7 @@ impl Engine for AuthorityRound {
// find most recently finalized blocks, then check transition store for pending transitions. // find most recently finalized blocks, then check transition store for pending transitions.
let mut epoch_manager = self.epoch_manager.lock(); let mut epoch_manager = self.epoch_manager.lock();
if !epoch_manager.zoom_to(&*client, self, &*self.validators, chain_head) { if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, chain_head) {
return None; return None;
} }
@ -782,14 +745,14 @@ impl Engine for AuthorityRound {
None None
} }
fn epoch_verifier<'a>(&self, _header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a> { fn epoch_verifier<'a>(&self, _header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a, EthereumMachine> {
let (signal_number, set_proof, finality_proof) = match destructure_proofs(proof) { let (signal_number, set_proof, finality_proof) = match destructure_proofs(proof) {
Ok(x) => x, Ok(x) => x,
Err(e) => return ConstructedVerifier::Err(e), Err(e) => return ConstructedVerifier::Err(e),
}; };
let first = signal_number == 0; let first = signal_number == 0;
match self.validators.epoch_set(first, self, signal_number, set_proof) { match self.validators.epoch_set(first, &self.machine, signal_number, set_proof) {
Ok((list, finalize)) => { Ok((list, finalize)) => {
let verifier = Box::new(EpochVerifier { let verifier = Box::new(EpochVerifier {
step: self.step.clone(), step: self.step.clone(),
@ -805,18 +768,6 @@ impl Engine for AuthorityRound {
} }
} }
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), Error> {
t.check_low_s()?;
if let Some(n) = t.chain_id() {
if header.number() >= self.params().eip155_transition && n != self.params().chain_id {
return Err(TransactionError::InvalidChainId.into());
}
}
Ok(())
}
fn register_client(&self, client: Weak<EngineClient>) { fn register_client(&self, client: Weak<EngineClient>) {
*self.client.write() = Some(client.clone()); *self.client.write() = Some(client.clone());
self.validators.register_client(client); self.validators.register_client(client);
@ -847,7 +798,6 @@ mod tests {
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::H520; use bigint::hash::H520;
use header::Header; use header::Header;
use error::{Error, BlockError};
use rlp::encode; use rlp::encode;
use block::*; use block::*;
use tests::helpers::*; use tests::helpers::*;
@ -872,27 +822,13 @@ mod tests {
assert!(schedule.stack_limit > 0); assert!(schedule.stack_limit > 0);
} }
#[test]
fn verification_fails_on_short_seal() {
let engine = Spec::new_test_round().engine;
let header: Header = Header::default();
let verify_result = engine.verify_block_basic(&header, None);
match verify_result {
Err(Error::Block(BlockError::InvalidSealArity(_))) => {},
Err(_) => { panic!("should be block seal-arity mismatch error (got {:?})", verify_result); },
_ => { panic!("Should be error, got Ok"); },
}
}
#[test] #[test]
fn can_do_signature_verification_fail() { fn can_do_signature_verification_fail() {
let engine = Spec::new_test_round().engine; let engine = Spec::new_test_round().engine;
let mut header: Header = Header::default(); let mut header: Header = Header::default();
header.set_seal(vec![encode(&H520::default()).into_vec()]); header.set_seal(vec![encode(&H520::default()).into_vec()]);
let verify_result = engine.verify_block_external(&header, None); let verify_result = engine.verify_block_external(&header);
assert!(verify_result.is_err()); assert!(verify_result.is_err());
} }
@ -946,11 +882,11 @@ mod tests {
// Two validators. // Two validators.
// Spec starts with step 2. // Spec starts with step 2.
header.set_seal(vec![encode(&2usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]); header.set_seal(vec![encode(&2usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header).is_ok());
assert!(engine.verify_block_external(&header, None).is_err()); assert!(engine.verify_block_external(&header).is_err());
header.set_seal(vec![encode(&1usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]); header.set_seal(vec![encode(&1usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header).is_ok());
assert!(engine.verify_block_external(&header, None).is_ok()); assert!(engine.verify_block_external(&header).is_ok());
} }
#[test] #[test]
@ -972,11 +908,11 @@ mod tests {
// Two validators. // Two validators.
// Spec starts with step 2. // Spec starts with step 2.
header.set_seal(vec![encode(&1usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]); header.set_seal(vec![encode(&1usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header).is_ok());
assert!(engine.verify_block_external(&header, None).is_ok()); assert!(engine.verify_block_external(&header).is_ok());
header.set_seal(vec![encode(&5usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]); header.set_seal(vec![encode(&5usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header).is_ok());
assert!(engine.verify_block_external(&header, None).is_err()); assert!(engine.verify_block_external(&header).is_err());
} }
#[test] #[test]
@ -998,9 +934,9 @@ mod tests {
// Two validators. // Two validators.
// Spec starts with step 2. // Spec starts with step 2.
header.set_seal(vec![encode(&5usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]); header.set_seal(vec![encode(&5usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header).is_ok());
header.set_seal(vec![encode(&3usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]); header.set_seal(vec![encode(&3usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_err()); assert!(engine.verify_block_family(&header, &parent_header).is_err());
} }
#[test] #[test]
@ -1013,12 +949,14 @@ mod tests {
validate_score_transition: 0, validate_score_transition: 0,
validate_step_transition: 0, validate_step_transition: 0,
immediate_transitions: true, immediate_transitions: true,
block_reward: Default::default(),
}; };
let aura = { let aura = {
let mut c_params = ::spec::CommonParams::default(); let mut c_params = ::spec::CommonParams::default();
c_params.gas_limit_bound_divisor = 5.into(); c_params.gas_limit_bound_divisor = 5.into();
AuthorityRound::new(c_params, params, Default::default()).unwrap() let machine = ::machine::EthereumMachine::regular(c_params, Default::default());
AuthorityRound::new(params, machine).unwrap()
}; };
let mut parent_header: Header = Header::default(); let mut parent_header: Header = Header::default();
@ -1030,12 +968,12 @@ mod tests {
header.set_seal(vec![encode(&3usize).into_vec()]); header.set_seal(vec![encode(&3usize).into_vec()]);
// Do not report when signer not present. // Do not report when signer not present.
assert!(aura.verify_block_family(&header, &parent_header, None).is_ok()); assert!(aura.verify_block_family(&header, &parent_header).is_ok());
assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 0); assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 0);
aura.set_signer(Arc::new(AccountProvider::transient_provider()), Default::default(), Default::default()); aura.set_signer(Arc::new(AccountProvider::transient_provider()), Default::default(), Default::default());
assert!(aura.verify_block_family(&header, &parent_header, None).is_ok()); assert!(aura.verify_block_family(&header, &parent_header).is_ok());
assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 1); assert_eq!(last_benign.load(AtomicOrdering::SeqCst), 1);
} }
} }

View File

@ -17,24 +17,18 @@
//! A blockchain engine that supports a basic, non-BFT proof-of-authority. //! A blockchain engine that supports a basic, non-BFT proof-of-authority.
use std::sync::{Weak, Arc}; use std::sync::{Weak, Arc};
use std::collections::BTreeMap;
use std::cmp;
use bigint::prelude::U256;
use bigint::hash::{H256, H520}; use bigint::hash::{H256, H520};
use parking_lot::RwLock; use parking_lot::RwLock;
use util::*; use util::*;
use unexpected::{Mismatch, OutOfBounds};
use ethkey::{recover, public_to_address, Signature}; use ethkey::{recover, public_to_address, Signature};
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::*; use block::*;
use builtin::Builtin; use engines::{Engine, Seal, ConstructedVerifier, EngineError};
use spec::CommonParams;
use engines::{Engine, Seal, Call, ConstructedVerifier, EngineError};
use error::{BlockError, Error}; use error::{BlockError, Error};
use evm::Schedule;
use ethjson; use ethjson;
use header::{Header, BlockNumber}; use header::Header;
use client::EngineClient; use client::EngineClient;
use machine::{AuxiliaryData, Call, EthereumMachine};
use semantic_version::SemanticVersion; use semantic_version::SemanticVersion;
use super::signer::EngineSigner; use super::signer::EngineSigner;
use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set};
@ -58,7 +52,7 @@ struct EpochVerifier {
list: SimpleList, list: SimpleList,
} }
impl super::EpochVerifier for EpochVerifier { impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
fn verify_light(&self, header: &Header) -> Result<(), Error> { fn verify_light(&self, header: &Header) -> Result<(), Error> {
verify_external(header, &self.list) verify_external(header, &self.list)
} }
@ -83,53 +77,31 @@ fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Err
/// Engine using `BasicAuthority`, trivial proof-of-authority consensus. /// Engine using `BasicAuthority`, trivial proof-of-authority consensus.
pub struct BasicAuthority { pub struct BasicAuthority {
params: CommonParams, machine: EthereumMachine,
builtins: BTreeMap<Address, Builtin>,
signer: RwLock<EngineSigner>, signer: RwLock<EngineSigner>,
validators: Box<ValidatorSet>, validators: Box<ValidatorSet>,
} }
impl BasicAuthority { impl BasicAuthority {
/// Create a new instance of BasicAuthority engine /// Create a new instance of BasicAuthority engine
pub fn new(params: CommonParams, our_params: BasicAuthorityParams, builtins: BTreeMap<Address, Builtin>) -> Self { pub fn new(our_params: BasicAuthorityParams, machine: EthereumMachine) -> Self {
BasicAuthority { BasicAuthority {
params: params, machine: machine,
builtins: builtins,
validators: new_validator_set(our_params.validators),
signer: Default::default(), signer: Default::default(),
validators: new_validator_set(our_params.validators),
} }
} }
} }
impl Engine for BasicAuthority { impl Engine<EthereumMachine> for BasicAuthority {
fn name(&self) -> &str { "BasicAuthority" } fn name(&self) -> &str { "BasicAuthority" }
fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) } fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
fn machine(&self) -> &EthereumMachine { &self.machine }
// One field - the signature // One field - the signature
fn seal_fields(&self) -> usize { 1 } fn seal_fields(&self) -> usize { 1 }
fn params(&self) -> &CommonParams { &self.params }
fn builtins(&self) -> &BTreeMap<Address, Builtin> { &self.builtins }
/// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, _header: &Header) -> BTreeMap<String, String> { map!["signature".to_owned() => "TODO".to_owned()] }
fn schedule(&self, _block_number: BlockNumber) -> Schedule {
Schedule::new_homestead()
}
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) {
header.set_difficulty(parent.difficulty().clone());
header.set_gas_limit({
let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.params().gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
}
});
}
fn seals_internally(&self) -> Option<bool> { fn seals_internally(&self) -> Option<bool> {
Some(self.signer.read().is_some()) Some(self.signer.read().is_some())
} }
@ -149,41 +121,11 @@ impl Engine for BasicAuthority {
Seal::None Seal::None
} }
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> {
// check the seal fields.
// TODO: pull this out into common code.
if header.seal().len() != self.seal_fields() {
return Err(From::from(BlockError::InvalidSealArity(
Mismatch { expected: self.seal_fields(), found: header.seal().len() }
)));
}
Ok(()) Ok(())
} }
fn verify_block_unordered(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_external(&self, header: &Header) -> Result<(), Error> {
Ok(())
}
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
// Do not calculate difficulty for genesis blocks.
if header.number() == 0 {
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
}
// Check difficulty is correct given the two timestamps.
if header.difficulty() != parent.difficulty() {
return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: *parent.difficulty(), found: *header.difficulty() })))
}
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() })));
}
Ok(())
}
fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
verify_external(header, &*self.validators) verify_external(header, &*self.validators)
} }
@ -192,26 +134,26 @@ impl Engine for BasicAuthority {
} }
#[cfg(not(test))] #[cfg(not(test))]
fn signals_epoch_end(&self, _header: &Header, _block: Option<&[u8]>, _receipts: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, _header: &Header, _auxiliary: AuxiliaryData)
-> super::EpochChange -> super::EpochChange<EthereumMachine>
{ {
// don't bother signalling even though a contract might try. // don't bother signalling even though a contract might try.
super::EpochChange::No super::EpochChange::No
} }
#[cfg(test)] #[cfg(test)]
fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, header: &Header, auxiliary: AuxiliaryData)
-> super::EpochChange -> super::EpochChange<EthereumMachine>
{ {
// in test mode, always signal even though they don't be finalized. // in test mode, always signal even though they don't be finalized.
let first = header.number() == 0; let first = header.number() == 0;
self.validators.signals_epoch_end(first, header, block, receipts) self.validators.signals_epoch_end(first, header, auxiliary)
} }
fn is_epoch_end( fn is_epoch_end(
&self, &self,
chain_head: &Header, chain_head: &Header,
_chain: &super::Headers, _chain: &super::Headers<Header>,
_transition_store: &super::PendingTransitionStore, _transition_store: &super::PendingTransitionStore,
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
let first = chain_head.number() == 0; let first = chain_head.number() == 0;
@ -220,10 +162,10 @@ impl Engine for BasicAuthority {
self.validators.is_epoch_end(first, chain_head) self.validators.is_epoch_end(first, chain_head)
} }
fn epoch_verifier<'a>(&self, header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a> { fn epoch_verifier<'a>(&self, header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a, EthereumMachine> {
let first = header.number() == 0; let first = header.number() == 0;
match self.validators.epoch_set(first, self, header.number(), proof) { match self.validators.epoch_set(first, &self.machine, header.number(), proof) {
Ok((list, finalize)) => { Ok((list, finalize)) => {
let verifier = Box::new(EpochVerifier { list: list }); let verifier = Box::new(EpochVerifier { list: list });
@ -260,7 +202,6 @@ mod tests {
use hash::keccak; use hash::keccak;
use bigint::hash::H520; use bigint::hash::H520;
use block::*; use block::*;
use error::{BlockError, Error};
use tests::helpers::*; use tests::helpers::*;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use header::Header; use header::Header;
@ -270,7 +211,7 @@ mod tests {
/// Create a new test chain spec with `BasicAuthority` consensus engine. /// Create a new test chain spec with `BasicAuthority` consensus engine.
fn new_test_authority() -> Spec { fn new_test_authority() -> Spec {
let bytes: &[u8] = include_bytes!("../../res/basic_authority.json"); let bytes: &[u8] = include_bytes!("../../res/basic_authority.json");
Spec::load(::std::env::temp_dir(), bytes).expect("invalid chain spec") Spec::load(&::std::env::temp_dir(), bytes).expect("invalid chain spec")
} }
#[test] #[test]
@ -287,27 +228,13 @@ mod tests {
assert!(schedule.stack_limit > 0); assert!(schedule.stack_limit > 0);
} }
#[test]
fn can_do_seal_verification_fail() {
let engine = new_test_authority().engine;
let header: Header = Header::default();
let verify_result = engine.verify_block_basic(&header, None);
match verify_result {
Err(Error::Block(BlockError::InvalidSealArity(_))) => {},
Err(_) => { panic!("should be block seal-arity mismatch error (got {:?})", verify_result); },
_ => { panic!("Should be error, got Ok"); },
}
}
#[test] #[test]
fn can_do_signature_verification_fail() { fn can_do_signature_verification_fail() {
let engine = new_test_authority().engine; let engine = new_test_authority().engine;
let mut header: Header = Header::default(); let mut header: Header = Header::default();
header.set_seal(vec![::rlp::encode(&H520::default()).into_vec()]); header.set_seal(vec![::rlp::encode(&H520::default()).into_vec()]);
let verify_result = engine.verify_block_family(&header, &Default::default(), None); let verify_result = engine.verify_block_external(&header);
assert!(verify_result.is_err()); assert!(verify_result.is_err());
} }

View File

@ -17,8 +17,6 @@
//! Epoch verifiers and transitions. //! Epoch verifiers and transitions.
use bigint::hash::H256; use bigint::hash::H256;
use error::Error;
use header::Header;
/// A full epoch transition. /// A full epoch transition.
#[derive(Debug, Clone, RlpEncodable, RlpDecodable)] #[derive(Debug, Clone, RlpEncodable, RlpDecodable)]
@ -40,15 +38,13 @@ pub struct PendingTransition {
} }
/// Verifier for all blocks within an epoch with self-contained state. /// Verifier for all blocks within an epoch with self-contained state.
/// pub trait EpochVerifier<M: ::parity_machine::Machine>: Send + Sync {
/// See docs on `Engine` relating to proving functions for more details.
pub trait EpochVerifier: Send + Sync {
/// Lightly verify the next block header. /// Lightly verify the next block header.
/// This may not be a header belonging to a different epoch. /// This may not be a header belonging to a different epoch.
fn verify_light(&self, header: &Header) -> Result<(), Error>; fn verify_light(&self, header: &M::Header) -> Result<(), M::Error>;
/// Perform potentially heavier checks on the next block header. /// Perform potentially heavier checks on the next block header.
fn verify_heavy(&self, header: &Header) -> Result<(), Error> { fn verify_heavy(&self, header: &M::Header) -> Result<(), M::Error> {
self.verify_light(header) self.verify_light(header)
} }
@ -63,6 +59,6 @@ pub trait EpochVerifier: Send + Sync {
/// Special "no-op" verifier for stateless, epoch-less engines. /// Special "no-op" verifier for stateless, epoch-less engines.
pub struct NoOp; pub struct NoOp;
impl EpochVerifier for NoOp { impl<M: ::parity_machine::Machine> EpochVerifier<M> for NoOp {
fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) } fn verify_light(&self, _header: &M::Header) -> Result<(), M::Error> { Ok(()) }
} }

View File

@ -14,51 +14,42 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{BTreeMap, HashMap};
use util::Address;
use builtin::Builtin;
use engines::{Engine, Seal}; use engines::{Engine, Seal};
use spec::CommonParams; use parity_machine::{Machine, Transactions};
use block::{ExecutedBlock, IsBlock};
/// An engine which does not provide any consensus mechanism, just seals blocks internally. /// An engine which does not provide any consensus mechanism, just seals blocks internally.
pub struct InstantSeal { /// Only seals blocks which have transactions.
params: CommonParams, pub struct InstantSeal<M> {
builtins: BTreeMap<Address, Builtin>, machine: M,
} }
impl InstantSeal { impl<M> InstantSeal<M> {
/// Returns new instance of InstantSeal with default VM Factory /// Returns new instance of InstantSeal over the given state machine.
pub fn new(params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Self { pub fn new(machine: M) -> Self {
InstantSeal { InstantSeal {
params: params, machine: machine,
builtins: builtins,
} }
} }
} }
impl Engine for InstantSeal { impl<M: Machine> Engine<M> for InstantSeal<M>
where M::LiveBlock: Transactions
{
fn name(&self) -> &str { fn name(&self) -> &str {
"InstantSeal" "InstantSeal"
} }
fn params(&self) -> &CommonParams { fn machine(&self) -> &M { &self.machine }
&self.params
}
fn additional_params(&self) -> HashMap<String, String> {
hash_map!["registrar".to_owned() => self.params().registrar.hex()]
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
&self.builtins
}
fn seals_internally(&self) -> Option<bool> { Some(true) } fn seals_internally(&self) -> Option<bool> { Some(true) }
fn generate_seal(&self, block: &ExecutedBlock) -> Seal { fn generate_seal(&self, block: &M::LiveBlock) -> Seal {
if block.transactions().is_empty() { Seal::None } else { Seal::Regular(Vec::new()) } if block.transactions().is_empty() { Seal::None } else { Seal::Regular(Vec::new()) }
} }
fn verify_local_seal(&self, _header: &M::Header) -> Result<(), M::Error> {
Ok(())
}
} }
#[cfg(test)] #[cfg(test)]
@ -91,10 +82,10 @@ mod tests {
let engine = Spec::new_instant().engine; let engine = Spec::new_instant().engine;
let mut header: Header = Header::default(); let mut header: Header = Header::default();
assert!(engine.verify_block_basic(&header, None).is_ok()); assert!(engine.verify_block_basic(&header).is_ok());
header.set_seal(vec![::rlp::encode(&H520::default()).into_vec()]); header.set_seal(vec![::rlp::encode(&H520::default()).into_vec()]);
assert!(engine.verify_block_unordered(&header, None).is_ok()); assert!(engine.verify_block_unordered(&header).is_ok());
} }
} }

View File

@ -42,18 +42,16 @@ use std::fmt;
use self::epoch::PendingTransition; use self::epoch::PendingTransition;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::ExecutedBlock;
use builtin::Builtin; use builtin::Builtin;
use client::EngineClient; use vm::{EnvInfo, Schedule, CreateContractAddress};
use vm::{EnvInfo, LastHashes, Schedule, CreateContractAddress};
use error::Error; use error::Error;
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use receipt::Receipt;
use snapshot::SnapshotComponents; use snapshot::SnapshotComponents;
use spec::CommonParams; use spec::CommonParams;
use transaction::{UnverifiedTransaction, SignedTransaction}; use transaction::{UnverifiedTransaction, SignedTransaction};
use ethkey::Signature; use ethkey::Signature;
use parity_machine::{Machine, LocalizedMachine as Localized};
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use semantic_version::SemanticVersion; use semantic_version::SemanticVersion;
@ -82,6 +80,8 @@ pub enum EngineError {
InsufficientProof(String), InsufficientProof(String),
/// Failed system call. /// Failed system call.
FailedSystemCall(String), FailedSystemCall(String),
/// Malformed consensus message.
MalformedMessage(String),
/// Requires client ref, but none registered. /// Requires client ref, but none registered.
RequiresClient, RequiresClient,
} }
@ -97,6 +97,7 @@ impl fmt::Display for EngineError {
BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob), BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob),
InsufficientProof(ref msg) => format!("Insufficient validation proof: {}", msg), InsufficientProof(ref msg) => format!("Insufficient validation proof: {}", msg),
FailedSystemCall(ref msg) => format!("Failed to make system call: {}", msg), FailedSystemCall(ref msg) => format!("Failed to make system call: {}", msg),
MalformedMessage(ref msg) => format!("Received malformed consensus message: {}", msg),
RequiresClient => format!("Call requires client but none registered"), RequiresClient => format!("Call requires client but none registered"),
}; };
@ -115,49 +116,46 @@ pub enum Seal {
None, None,
} }
/// Type alias for a function we can make calls through synchronously.
/// Returns the call result and state proof for each call.
pub type Call<'a> = Fn(Address, Bytes) -> Result<(Bytes, Vec<Vec<u8>>), String> + 'a;
/// Type alias for a function we can get headers by hash through. /// Type alias for a function we can get headers by hash through.
pub type Headers<'a> = Fn(H256) -> Option<Header> + 'a; pub type Headers<'a, H> = Fn(H256) -> Option<H> + 'a;
/// Type alias for a function we can query pending transitions by block hash through. /// Type alias for a function we can query pending transitions by block hash through.
pub type PendingTransitionStore<'a> = Fn(H256) -> Option<PendingTransition> + 'a; pub type PendingTransitionStore<'a> = Fn(H256) -> Option<PendingTransition> + 'a;
/// Proof dependent on state. /// Proof dependent on state.
pub trait StateDependentProof: Send + Sync { pub trait StateDependentProof<M: Machine>: Send + Sync {
/// Generate a proof, given the state. /// Generate a proof, given the state.
fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String>; // TODO: make this into an &M::StateContext
fn generate_proof<'a>(&self, state: &<M as Localized<'a>>::StateContext) -> Result<Vec<u8>, String>;
/// Check a proof generated elsewhere (potentially by a peer). /// Check a proof generated elsewhere (potentially by a peer).
// `engine` needed to check state proofs, while really this should // `engine` needed to check state proofs, while really this should
// just be state machine params. // just be state machine params.
fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String>; fn check_proof(&self, machine: &M, proof: &[u8]) -> Result<(), String>;
} }
/// Proof generated on epoch change. /// Proof generated on epoch change.
pub enum Proof { pub enum Proof<M: Machine> {
/// Known proof (extracted from signal) /// Known proof (extracted from signal)
Known(Vec<u8>), Known(Vec<u8>),
/// State dependent proof. /// State dependent proof.
WithState(Arc<StateDependentProof>), WithState(Arc<StateDependentProof<M>>),
} }
/// Generated epoch verifier. /// Generated epoch verifier.
pub enum ConstructedVerifier<'a> { pub enum ConstructedVerifier<'a, M: Machine> {
/// Fully trusted verifier. /// Fully trusted verifier.
Trusted(Box<EpochVerifier>), Trusted(Box<EpochVerifier<M>>),
/// Verifier unconfirmed. Check whether given finality proof finalizes given hash /// Verifier unconfirmed. Check whether given finality proof finalizes given hash
/// under previous epoch. /// under previous epoch.
Unconfirmed(Box<EpochVerifier>, &'a [u8], H256), Unconfirmed(Box<EpochVerifier<M>>, &'a [u8], H256),
/// Error constructing verifier. /// Error constructing verifier.
Err(Error), Err(Error),
} }
impl<'a> ConstructedVerifier<'a> { impl<'a, M: Machine> ConstructedVerifier<'a, M> {
/// Convert to a result, indicating that any necessary confirmation has been done /// Convert to a result, indicating that any necessary confirmation has been done
/// already. /// already.
pub fn known_confirmed(self) -> Result<Box<EpochVerifier>, Error> { pub fn known_confirmed(self) -> Result<Box<EpochVerifier<M>>, Error> {
match self { match self {
ConstructedVerifier::Trusted(v) | ConstructedVerifier::Unconfirmed(v, _, _) => Ok(v), ConstructedVerifier::Trusted(v) | ConstructedVerifier::Unconfirmed(v, _, _) => Ok(v),
ConstructedVerifier::Err(e) => Err(e), ConstructedVerifier::Err(e) => Err(e),
@ -166,84 +164,53 @@ impl<'a> ConstructedVerifier<'a> {
} }
/// Results of a query of whether an epoch change occurred at the given block. /// Results of a query of whether an epoch change occurred at the given block.
pub enum EpochChange { pub enum EpochChange<M: Machine> {
/// Cannot determine until more data is passed. /// Cannot determine until more data is passed.
Unsure(Unsure), Unsure(M::AuxiliaryRequest),
/// No epoch change. /// No epoch change.
No, No,
/// The epoch will change, with proof. /// The epoch will change, with proof.
Yes(Proof), Yes(Proof<M>),
}
/// More data required to determine if an epoch change occurred at a given block.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Unsure {
/// Needs the body.
NeedsBody,
/// Needs the receipts.
NeedsReceipts,
/// Needs both body and receipts.
NeedsBoth,
} }
/// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based. /// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based.
/// Provides hooks into each of the major parts of block import. /// Provides hooks into each of the major parts of block import.
pub trait Engine : Sync + Send { pub trait Engine<M: Machine>: Sync + Send {
/// The name of this engine. /// The name of this engine.
fn name(&self) -> &str; fn name(&self) -> &str;
/// The version of this engine. Should be of the form /// The version of this engine. Should be of the form
fn version(&self) -> SemanticVersion { SemanticVersion::new(0, 0, 0) } fn version(&self) -> SemanticVersion { SemanticVersion::new(0, 0, 0) }
/// Get access to the underlying state machine.
// TODO: decouple.
fn machine(&self) -> &M;
/// The number of additional header fields required for this engine. /// The number of additional header fields required for this engine.
fn seal_fields(&self) -> usize { 0 } fn seal_fields(&self) -> usize { 0 }
/// Additional engine-specific information for the user/developer concerning `header`. /// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, _header: &Header) -> BTreeMap<String, String> { BTreeMap::new() } fn extra_info(&self, _header: &M::Header) -> BTreeMap<String, String> { BTreeMap::new() }
/// Additional information. /// Additional information.
fn additional_params(&self) -> HashMap<String, String> { HashMap::new() } fn additional_params(&self) -> HashMap<String, String> { HashMap::new() }
/// Get the general parameters of the chain.
fn params(&self) -> &CommonParams;
/// Get the EVM schedule for the given `block_number`.
fn schedule(&self, block_number: BlockNumber) -> Schedule {
self.params().schedule(block_number)
}
/// Builtin-contracts we would like to see in the chain.
/// (In principle these are just hints for the engine since that has the last word on them.)
fn builtins(&self) -> &BTreeMap<Address, Builtin>;
/// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`.
fn maximum_extra_data_size(&self) -> usize { self.params().maximum_extra_data_size }
/// Maximum number of uncles a block is allowed to declare. /// Maximum number of uncles a block is allowed to declare.
fn maximum_uncle_count(&self) -> usize { 2 } fn maximum_uncle_count(&self) -> usize { 2 }
/// The number of generations back that uncles can be. /// The number of generations back that uncles can be.
fn maximum_uncle_age(&self) -> usize { 6 } fn maximum_uncle_age(&self) -> usize { 6 }
/// The nonce with which accounts begin at given block.
fn account_start_nonce(&self, block: u64) -> U256 {
if block >= self.params().dust_protection_transition {
U256::from(self.params().nonce_cap_increment) * U256::from(block)
} else {
self.params().account_start_nonce
}
}
/// Block transformation functions, before the transactions. /// Block transformation functions, before the transactions.
/// `epoch_begin` set to true if this block kicks off an epoch. /// `epoch_begin` set to true if this block kicks off an epoch.
fn on_new_block( fn on_new_block(
&self, &self,
block: &mut ExecutedBlock, _block: &mut M::LiveBlock,
last_hashes: Arc<LastHashes>,
_epoch_begin: bool, _epoch_begin: bool,
) -> Result<(), Error> { ) -> Result<(), M::Error> {
let parent_hash = block.fields().header.parent_hash().clone(); Ok(())
common::push_last_hash(block, last_hashes, self, &parent_hash)
} }
/// Block transformation functions, after the transactions. /// Block transformation functions, after the transactions.
fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { fn on_close_block(&self, _block: &mut M::LiveBlock) -> Result<(), M::Error> {
Ok(()) Ok(())
} }
@ -251,68 +218,57 @@ pub trait Engine : Sync + Send {
/// Some(true) means the engine is currently prime for seal generation (i.e. node is the current validator). /// Some(true) means the engine is currently prime for seal generation (i.e. node is the current validator).
/// Some(false) means that the node might seal internally but is not qualified now. /// Some(false) means that the node might seal internally but is not qualified now.
fn seals_internally(&self) -> Option<bool> { None } fn seals_internally(&self) -> Option<bool> { None }
/// Attempt to seal the block internally. /// Attempt to seal the block internally.
/// ///
/// If `Some` is returned, then you get a valid seal. /// If `Some` is returned, then you get a valid seal.
/// ///
/// This operation is synchronous and may (quite reasonably) not be available, in which None will /// This operation is synchronous and may (quite reasonably) not be available, in which None will
/// be returned. /// be returned.
fn generate_seal(&self, _block: &ExecutedBlock) -> Seal { Seal::None } ///
/// It is fine to require access to state or a full client for this function, since
/// light clients do not generate seals.
fn generate_seal(&self, _block: &M::LiveBlock) -> Seal { Seal::None }
/// Phase 1 quick block verification. Only does checks that are cheap. `block` (the header's full block) /// Verify a locally-generated seal of a header.
/// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import. ///
fn verify_block_basic(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) } /// If this engine seals internally,
/// no checks have to be done here, since all internally generated seals
/// should be valid.
///
/// Externally-generated seals (e.g. PoW) will need to be checked for validity.
///
/// It is fine to require access to state or a full client for this function, since
/// light clients do not generate seals.
fn verify_local_seal(&self, header: &M::Header) -> Result<(), M::Error>;
/// Phase 2 verification. Perform costly checks such as transaction signatures. `block` (the header's full block) /// Phase 1 quick block verification. Only does checks that are cheap. Returns either a null `Ok` or a general error detailing the problem with import.
/// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import. fn verify_block_basic(&self, _header: &M::Header) -> Result<(), M::Error> { Ok(()) }
fn verify_block_unordered(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) }
/// Phase 3 verification. Check block information against parent and uncles. `block` (the header's full block) /// Phase 2 verification. Perform costly checks such as transaction signatures. Returns either a null `Ok` or a general error detailing the problem with import.
/// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import. fn verify_block_unordered(&self, _header: &M::Header) -> Result<(), M::Error> { Ok(()) }
fn verify_block_family(&self, _header: &Header, _parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) }
/// Phase 3 verification. Check block information against parent. Returns either a null `Ok` or a general error detailing the problem with import.
fn verify_block_family(&self, _header: &M::Header, _parent: &M::Header) -> Result<(), Error> { Ok(()) }
/// Phase 4 verification. Verify block header against potentially external data. /// Phase 4 verification. Verify block header against potentially external data.
fn verify_block_external(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) } /// Should only be called when `register_client` has been called previously.
fn verify_block_external(&self, _header: &M::Header) -> Result<(), Error> { Ok(()) }
/// Additional verification for transactions in blocks.
// TODO: Add flags for which bits of the transaction to check.
// TODO: consider including State in the params.
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, _header: &Header) -> Result<(), Error> {
t.verify_basic(true, Some(self.params().chain_id), true)?;
Ok(())
}
/// Verify a particular transaction is valid.
fn verify_transaction(&self, t: UnverifiedTransaction, _header: &Header) -> Result<SignedTransaction, Error> {
SignedTransaction::new(t)
}
/// The network ID that transactions should be signed with.
fn signing_chain_id(&self, _env_info: &EnvInfo) -> Option<u64> {
Some(self.params().chain_id)
}
/// Verify the seal of a block. This is an auxilliary method that actually just calls other `verify_` methods
/// to get the job done. By default it must pass `verify_basic` and `verify_block_unordered`. If more or fewer
/// methods are needed for an Engine, this may be overridden.
fn verify_block_seal(&self, header: &Header) -> Result<(), Error> {
self.verify_block_basic(header, None).and_then(|_| self.verify_block_unordered(header, None))
}
/// Genesis epoch data. /// Genesis epoch data.
fn genesis_epoch_data(&self, _header: &Header, _call: &Call) -> Result<Vec<u8>, String> { Ok(Vec::new()) } fn genesis_epoch_data<'a>(&self, _header: &M::Header, _state: &<M as Localized<'a>>::StateContext) -> Result<Vec<u8>, String> { Ok(Vec::new()) }
/// Whether an epoch change is signalled at the given header but will require finality. /// Whether an epoch change is signalled at the given header but will require finality.
/// If a change can be enacted immediately then return `No` from this function but /// If a change can be enacted immediately then return `No` from this function but
/// `Yes` from `is_epoch_end`. /// `Yes` from `is_epoch_end`.
/// ///
/// If the block or receipts are required, return `Unsure` and the function will be /// If auxiliary data of the block is required, return an auxiliary request and the function will be
/// called again with them. /// called again with them.
/// Return `Yes` or `No` when the answer is definitively known. /// Return `Yes` or `No` when the answer is definitively known.
/// ///
/// Should not interact with state. /// Should not interact with state.
fn signals_epoch_end(&self, _header: &Header, _block: Option<&[u8]>, _receipts: Option<&[Receipt]>) fn signals_epoch_end<'a>(&self, _header: &M::Header, _aux: <M as Localized<'a>>::AuxiliaryData)
-> EpochChange -> EpochChange<M>
{ {
EpochChange::No EpochChange::No
} }
@ -326,8 +282,8 @@ pub trait Engine : Sync + Send {
/// Return optional transition proof. /// Return optional transition proof.
fn is_epoch_end( fn is_epoch_end(
&self, &self,
_chain_head: &Header, _chain_head: &M::Header,
_chain: &Headers, _chain: &Headers<M::Header>,
_transition_store: &PendingTransitionStore, _transition_store: &PendingTransitionStore,
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
None None
@ -335,35 +291,21 @@ pub trait Engine : Sync + Send {
/// Create an epoch verifier from validation proof and a flag indicating /// Create an epoch verifier from validation proof and a flag indicating
/// whether finality is required. /// whether finality is required.
fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> ConstructedVerifier<'a> { fn epoch_verifier<'a>(&self, _header: &M::Header, _proof: &'a [u8]) -> ConstructedVerifier<'a, M> {
ConstructedVerifier::Trusted(Box::new(self::epoch::NoOp)) ConstructedVerifier::Trusted(Box::new(self::epoch::NoOp))
} }
/// Populate a header's fields based on its parent's header. /// Populate a header's fields based on its parent's header.
/// Usually implements the chain scoring rule based on weight. /// Usually implements the chain scoring rule based on weight.
/// The gas floor target must not be lower than the engine's minimum gas limit. fn populate_from_parent(&self, _header: &mut M::Header, _parent: &M::Header) { }
fn populate_from_parent(&self, header: &mut Header, parent: &Header, _gas_floor_target: U256, _gas_ceil_target: U256) {
header.set_difficulty(parent.difficulty().clone());
header.set_gas_limit(parent.gas_limit().clone());
}
/// Handle any potential consensus messages; /// Handle any potential consensus messages;
/// updating consensus state and potentially issuing a new one. /// updating consensus state and potentially issuing a new one.
fn handle_message(&self, _message: &[u8]) -> Result<(), Error> { Err(EngineError::UnexpectedMessage.into()) } fn handle_message(&self, _message: &[u8]) -> Result<(), EngineError> { Err(EngineError::UnexpectedMessage) }
/// Attempt to get a handle to a built-in contract.
/// Only returns references to activated built-ins.
// TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic
// from Spec into here and removing the Spec::builtins field.
fn builtin(&self, a: &Address, block_number: ::header::BlockNumber) -> Option<&Builtin> {
self.builtins()
.get(a)
.and_then(|b| if b.is_active(block_number) { Some(b) } else { None })
}
/// Find out if the block is a proposal block and should not be inserted into the DB. /// Find out if the block is a proposal block and should not be inserted into the DB.
/// Takes a header of a fully verified block. /// Takes a header of a fully verified block.
fn is_proposal(&self, _verified_header: &Header) -> bool { false } fn is_proposal(&self, _verified_header: &M::Header) -> bool { false }
/// Register an account which signs consensus messages. /// Register an account which signs consensus messages.
fn set_signer(&self, _account_provider: Arc<AccountProvider>, _address: Address, _password: String) {} fn set_signer(&self, _account_provider: Arc<AccountProvider>, _address: Address, _password: String) {}
@ -371,8 +313,8 @@ pub trait Engine : Sync + Send {
/// Sign using the EngineSigner, to be used for consensus tx signing. /// Sign using the EngineSigner, to be used for consensus tx signing.
fn sign(&self, _hash: H256) -> Result<Signature, Error> { unimplemented!() } fn sign(&self, _hash: H256) -> Result<Signature, Error> { unimplemented!() }
/// Add Client which can be used for sealing, querying the state and sending messages. /// Add Client which can be used for sealing, potentially querying the state and sending messages.
fn register_client(&self, _client: Weak<EngineClient>) {} fn register_client(&self, _client: Weak<M::EngineClient>) {}
/// Trigger next step of the consensus engine. /// Trigger next step of the consensus engine.
fn step(&self) {} fn step(&self) {}
@ -390,118 +332,96 @@ pub trait Engine : Sync + Send {
fn supports_warp(&self) -> bool { fn supports_warp(&self) -> bool {
self.snapshot_components().is_some() self.snapshot_components().is_some()
} }
}
/// If this engine supports wasm contracts. /// Common type alias for an engine coupled with an Ethereum-like state machine.
fn supports_wasm(&self) -> bool { // TODO: make this a _trait_ alias when those exist.
self.params().wasm // fortunately the effect is largely the same since engines are mostly used
// via trait objects.
pub trait EthEngine: Engine<::machine::EthereumMachine> {
/// Get the general parameters of the chain.
fn params(&self) -> &CommonParams {
self.machine().params()
}
/// Get the EVM schedule for the given block number.
fn schedule(&self, block_number: BlockNumber) -> Schedule {
self.machine().schedule(block_number)
}
/// Builtin-contracts for the chain..
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
self.machine().builtins()
}
/// Attempt to get a handle to a built-in contract.
/// Only returns references to activated built-ins.
fn builtin(&self, a: &Address, block_number: BlockNumber) -> Option<&Builtin> {
self.machine().builtin(a, block_number)
}
/// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`.
fn maximum_extra_data_size(&self) -> usize {
self.machine().maximum_extra_data_size()
}
/// The nonce with which accounts begin at given block.
fn account_start_nonce(&self, block: u64) -> U256 {
self.machine().account_start_nonce(block)
}
/// The network ID that transactions should be signed with.
fn signing_chain_id(&self, env_info: &EnvInfo) -> Option<u64> {
self.machine().signing_chain_id(env_info)
} }
/// Returns new contract address generation scheme at given block number. /// Returns new contract address generation scheme at given block number.
fn create_address_scheme(&self, number: BlockNumber) -> CreateContractAddress { fn create_address_scheme(&self, number: BlockNumber) -> CreateContractAddress {
if number >= self.params().eip86_transition { self.machine().create_address_scheme(number)
CreateContractAddress::FromCodeHash
} else {
CreateContractAddress::FromSenderAndNonce
} }
/// Verify a particular transaction is valid.
fn verify_transaction_unordered(&self, t: UnverifiedTransaction, header: &Header) -> Result<SignedTransaction, Error> {
self.machine().verify_transaction_unordered(t, header)
}
/// Additional verification for transactions in blocks.
// TODO: Add flags for which bits of the transaction to check.
// TODO: consider including State in the params.
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), Error> {
self.machine().verify_transaction_basic(t, header)
}
/// If this machine supports wasm.
fn supports_wasm(&self) -> bool {
self.machine().supports_wasm()
} }
} }
// convenience wrappers for existing functions.
impl<T> EthEngine for T where T: Engine<::machine::EthereumMachine> { }
/// Common engine utilities /// Common engine utilities
pub mod common { pub mod common {
use std::sync::Arc;
use block::ExecutedBlock; use block::ExecutedBlock;
use error::Error; use error::Error;
use transaction::SYSTEM_ADDRESS; use trace::{Tracer, ExecutiveTracer, RewardType};
use executive::Executive;
use vm::{CallType, ActionParams, ActionValue, EnvInfo, LastHashes};
use trace::{NoopTracer, NoopVMTracer, Tracer, ExecutiveTracer, RewardType};
use state::Substate;
use state::CleanupMode; use state::CleanupMode;
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::H256;
use util::*;
use bytes::{Bytes, BytesRef};
use super::Engine;
/// Execute a call as the system address. /// Give reward and trace.
pub fn execute_as_system<E: Engine + ?Sized>( pub fn bestow_block_reward(block: &mut ExecutedBlock, reward: U256) -> Result<(), Error> {
block: &mut ExecutedBlock,
last_hashes: Arc<LastHashes>,
engine: &E,
contract_address: Address,
gas: U256,
data: Option<Bytes>,
) -> Result<Bytes, Error> {
let env_info = {
let header = block.fields().header;
EnvInfo {
number: header.number(),
author: header.author().clone(),
timestamp: header.timestamp(),
difficulty: header.difficulty().clone(),
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: gas,
}
};
let mut state = block.fields_mut().state;
let params = ActionParams {
code_address: contract_address.clone(),
address: contract_address.clone(),
sender: SYSTEM_ADDRESS.clone(),
origin: SYSTEM_ADDRESS.clone(),
gas: gas,
gas_price: 0.into(),
value: ActionValue::Transfer(0.into()),
code: state.code(&contract_address)?,
code_hash: Some(state.code_hash(&contract_address)?),
data: data,
call_type: CallType::Call,
};
let mut ex = Executive::new(&mut state, &env_info, engine);
let mut substate = Substate::new();
let mut output = Vec::new();
if let Err(e) = ex.call(params, &mut substate, BytesRef::Flexible(&mut output), &mut NoopTracer, &mut NoopVMTracer) {
warn!("Encountered error on making system call: {}", e);
}
Ok(output)
}
/// Push last known block hash to the state.
pub fn push_last_hash<E: Engine + ?Sized>(block: &mut ExecutedBlock, last_hashes: Arc<LastHashes>, engine: &E, hash: &H256) -> Result<(), Error> {
if block.fields().header.number() == engine.params().eip210_transition {
let state = block.fields_mut().state;
state.init_code(&engine.params().eip210_contract_address, engine.params().eip210_contract_code.clone())?;
}
if block.fields().header.number() >= engine.params().eip210_transition {
let _ = execute_as_system(
block,
last_hashes,
engine,
engine.params().eip210_contract_address,
engine.params().eip210_contract_gas,
Some(hash.to_vec()),
)?;
}
Ok(())
}
/// Trace rewards on closing block
pub fn bestow_block_reward<E: Engine + ?Sized>(block: &mut ExecutedBlock, engine: &E) -> Result<(), Error> {
let fields = block.fields_mut(); let fields = block.fields_mut();
// Bestow block reward // Bestow block reward
let reward = engine.params().block_reward;
let res = fields.state.add_balance(fields.header.author(), &reward, CleanupMode::NoEmpty) let res = fields.state.add_balance(fields.header.author(), &reward, CleanupMode::NoEmpty)
.map_err(::error::Error::from) .map_err(::error::Error::from)
.and_then(|_| fields.state.commit()); .and_then(|_| fields.state.commit());
let block_author = fields.header.author().clone(); let block_author = fields.header.author().clone();
fields.traces.as_mut().map(|mut traces| { fields.traces.as_mut().map(move |mut traces| {
let mut tracer = ExecutiveTracer::default(); let mut tracer = ExecutiveTracer::default();
tracer.trace_reward(block_author, engine.params().block_reward, RewardType::Block); tracer.trace_reward(block_author, reward, RewardType::Block);
traces.push(tracer.drain()) traces.push(tracer.drain())
}); });

View File

@ -14,103 +14,92 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeMap;
use util::Address;
use builtin::Builtin;
use block::{ExecutedBlock, IsBlock};
use bigint::prelude::U256; use bigint::prelude::U256;
use engines::Engine; use engines::Engine;
use spec::CommonParams; use parity_machine::{Header, LiveBlock, WithBalances};
use evm::Schedule;
use header::BlockNumber; /// Params for a null engine.
use error::Error; #[derive(Clone, Default)]
use state::CleanupMode; pub struct NullEngineParams {
use trace::{Tracer, ExecutiveTracer, RewardType}; /// base reward for a block.
pub block_reward: U256,
}
impl From<::ethjson::spec::NullEngineParams> for NullEngineParams {
fn from(p: ::ethjson::spec::NullEngineParams) -> Self {
NullEngineParams {
block_reward: p.block_reward.map_or_else(Default::default, Into::into),
}
}
}
/// An engine which does not provide any consensus mechanism and does not seal blocks. /// An engine which does not provide any consensus mechanism and does not seal blocks.
pub struct NullEngine { pub struct NullEngine<M> {
params: CommonParams, params: NullEngineParams,
builtins: BTreeMap<Address, Builtin>, machine: M,
} }
impl NullEngine { impl<M> NullEngine<M> {
/// Returns new instance of NullEngine with default VM Factory /// Returns new instance of NullEngine with default VM Factory
pub fn new(params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Self { pub fn new(params: NullEngineParams, machine: M) -> Self {
NullEngine{ NullEngine {
params: params, params: params,
builtins: builtins, machine: machine,
} }
} }
} }
impl Default for NullEngine { impl<M: Default> Default for NullEngine<M> {
fn default() -> Self { fn default() -> Self {
Self::new(Default::default(), Default::default()) Self::new(Default::default(), Default::default())
} }
} }
impl Engine for NullEngine { impl<M: WithBalances> Engine<M> for NullEngine<M> {
fn name(&self) -> &str { fn name(&self) -> &str {
"NullEngine" "NullEngine"
} }
fn params(&self) -> &CommonParams { fn machine(&self) -> &M { &self.machine }
&self.params
fn on_close_block(&self, block: &mut M::LiveBlock) -> Result<(), M::Error> {
use std::ops::Shr;
let author = *LiveBlock::header(&*block).author();
let number = LiveBlock::header(&*block).number();
let reward = self.params.block_reward;
if reward == U256::zero() { return Ok(()) }
let n_uncles = LiveBlock::uncles(&*block).len();
// Bestow block reward
let result_block_reward = reward + reward.shr(5) * U256::from(n_uncles);
let mut uncle_rewards = Vec::with_capacity(n_uncles);
self.machine.add_balance(block, &author, &result_block_reward)?;
// bestow uncle rewards.
for u in LiveBlock::uncles(&*block) {
let uncle_author = u.author();
let result_uncle_reward = (reward * U256::from(8 + u.number() - number)).shr(3);
uncle_rewards.push((*uncle_author, result_uncle_reward));
} }
fn builtins(&self) -> &BTreeMap<Address, Builtin> { for &(ref a, ref reward) in &uncle_rewards {
&self.builtins self.machine.add_balance(block, a, reward)?;
} }
fn schedule(&self, _block_number: BlockNumber) -> Schedule { // note and trace.
Schedule::new_homestead() self.machine.note_rewards(block, &[(author, result_block_reward)], &uncle_rewards)
}
fn verify_local_seal(&self, _header: &M::Header) -> Result<(), M::Error> {
Ok(())
} }
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> { fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000))) Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000)))
} }
fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
if self.params.block_reward == U256::zero() {
// we don't have to apply reward in this case
return Ok(())
}
/// Block reward
let tracing_enabled = block.tracing_enabled();
let fields = block.fields_mut();
let mut tracer = ExecutiveTracer::default();
let result_block_reward = U256::from(1000000000);
fields.state.add_balance(
fields.header.author(),
&result_block_reward,
CleanupMode::NoEmpty
)?;
if tracing_enabled {
let block_author = fields.header.author().clone();
tracer.trace_reward(block_author, result_block_reward, RewardType::Block);
}
/// Uncle rewards
let result_uncle_reward = U256::from(10000000);
for u in fields.uncles.iter() {
let uncle_author = u.author().clone();
fields.state.add_balance(
u.author(),
&(result_uncle_reward),
CleanupMode::NoEmpty
)?;
if tracing_enabled {
tracer.trace_reward(uncle_author, result_uncle_reward, RewardType::Uncle);
}
}
fields.state.commit()?;
if tracing_enabled {
fields.traces.as_mut().map(|mut traces| traces.push(tracer.drain()));
}
Ok(())
}
} }

View File

@ -27,9 +27,8 @@ mod params;
use std::sync::{Weak, Arc}; use std::sync::{Weak, Arc};
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
use std::collections::{HashSet, BTreeMap, HashMap}; use std::collections::{HashSet, BTreeMap};
use hash::keccak; use hash::keccak;
use std::cmp;
use bigint::prelude::{U128, U256}; use bigint::prelude::{U128, U256};
use bigint::hash::{H256, H520}; use bigint::hash::{H256, H520};
use parking_lot::RwLock; use parking_lot::RwLock;
@ -39,12 +38,10 @@ use client::EngineClient;
use bytes::Bytes; use bytes::Bytes;
use error::{Error, BlockError}; use error::{Error, BlockError};
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use builtin::Builtin;
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use ethkey::{Message, public_to_address, recover, Signature}; use ethkey::{Message, public_to_address, recover, Signature};
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::*; use block::*;
use spec::CommonParams;
use engines::{Engine, Seal, EngineError, ConstructedVerifier}; use engines::{Engine, Seal, EngineError, ConstructedVerifier};
use io::IoService; use io::IoService;
use super::signer::EngineSigner; use super::signer::EngineSigner;
@ -54,6 +51,7 @@ use super::vote_collector::VoteCollector;
use self::message::*; use self::message::*;
use self::params::TendermintParams; use self::params::TendermintParams;
use semantic_version::SemanticVersion; use semantic_version::SemanticVersion;
use machine::{AuxiliaryData, EthereumMachine};
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub enum Step { pub enum Step {
@ -78,8 +76,6 @@ pub type BlockHash = H256;
/// Engine using `Tendermint` consensus algorithm, suitable for EVM chain. /// Engine using `Tendermint` consensus algorithm, suitable for EVM chain.
pub struct Tendermint { pub struct Tendermint {
params: CommonParams,
builtins: BTreeMap<Address, Builtin>,
step_service: IoService<Step>, step_service: IoService<Step>,
client: RwLock<Option<Weak<EngineClient>>>, client: RwLock<Option<Weak<EngineClient>>>,
/// Blockchain height. /// Blockchain height.
@ -104,6 +100,10 @@ pub struct Tendermint {
last_proposed: RwLock<H256>, last_proposed: RwLock<H256>,
/// Set used to determine the current validators. /// Set used to determine the current validators.
validators: Box<ValidatorSet>, validators: Box<ValidatorSet>,
/// Reward per block, in base units.
block_reward: U256,
/// ethereum machine descriptor
machine: EthereumMachine,
} }
struct EpochVerifier<F> struct EpochVerifier<F>
@ -113,7 +113,7 @@ struct EpochVerifier<F>
recover: F recover: F
} }
impl <F> super::EpochVerifier for EpochVerifier<F> impl <F> super::EpochVerifier<EthereumMachine> for EpochVerifier<F>
where F: Fn(&Signature, &Message) -> Result<Address, Error> + Send + Sync where F: Fn(&Signature, &Message) -> Result<Address, Error> + Send + Sync
{ {
fn verify_light(&self, header: &Header) -> Result<(), Error> { fn verify_light(&self, header: &Header) -> Result<(), Error> {
@ -167,11 +167,9 @@ fn destructure_proofs(combined: &[u8]) -> Result<(BlockNumber, &[u8], &[u8]), Er
impl Tendermint { impl Tendermint {
/// Create a new instance of Tendermint engine /// Create a new instance of Tendermint engine
pub fn new(params: CommonParams, our_params: TendermintParams, builtins: BTreeMap<Address, Builtin>) -> Result<Arc<Self>, Error> { pub fn new(our_params: TendermintParams, machine: EthereumMachine) -> Result<Arc<Self>, Error> {
let engine = Arc::new( let engine = Arc::new(
Tendermint { Tendermint {
params: params,
builtins: builtins,
client: RwLock::new(None), client: RwLock::new(None),
step_service: IoService::<Step>::start()?, step_service: IoService::<Step>::start()?,
height: AtomicUsize::new(1), height: AtomicUsize::new(1),
@ -185,9 +183,13 @@ impl Tendermint {
proposal_parent: Default::default(), proposal_parent: Default::default(),
last_proposed: Default::default(), last_proposed: Default::default(),
validators: our_params.validators, validators: our_params.validators,
block_reward: our_params.block_reward,
machine: machine,
}); });
let handler = TransitionHandler::new(Arc::downgrade(&engine) as Weak<Engine>, Box::new(our_params.timeouts));
let handler = TransitionHandler::new(Arc::downgrade(&engine) as Weak<Engine<_>>, Box::new(our_params.timeouts));
engine.step_service.register_handler(Arc::new(handler))?; engine.step_service.register_handler(Arc::new(handler))?;
Ok(engine) Ok(engine)
} }
@ -438,7 +440,7 @@ impl Tendermint {
} }
} }
impl Engine for Tendermint { impl Engine<EthereumMachine> for Tendermint {
fn name(&self) -> &str { "Tendermint" } fn name(&self) -> &str { "Tendermint" }
fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) } fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
@ -446,13 +448,7 @@ impl Engine for Tendermint {
/// (consensus view, proposal signature, authority signatures) /// (consensus view, proposal signature, authority signatures)
fn seal_fields(&self) -> usize { 3 } fn seal_fields(&self) -> usize { 3 }
fn params(&self) -> &CommonParams { &self.params } fn machine(&self) -> &EthereumMachine { &self.machine }
fn additional_params(&self) -> HashMap<String, String> {
hash_map!["registrar".to_owned() => self.params().registrar.hex()]
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> { &self.builtins }
fn maximum_uncle_count(&self) -> usize { 0 } fn maximum_uncle_count(&self) -> usize { 0 }
@ -469,19 +465,13 @@ impl Engine for Tendermint {
] ]
} }
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) { fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
// Chain scoring: total weight is sqrt(U256::max_value())*height - view // Chain scoring: total weight is sqrt(U256::max_value())*height - view
let new_difficulty = U256::from(U128::max_value()) + consensus_view(parent).expect("Header has been verified; qed").into() - self.view.load(AtomicOrdering::SeqCst).into(); let new_difficulty = U256::from(U128::max_value())
+ consensus_view(parent).expect("Header has been verified; qed").into()
- self.view.load(AtomicOrdering::SeqCst).into();
header.set_difficulty(new_difficulty); header.set_difficulty(new_difficulty);
header.set_gas_limit({
let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.params().gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
}
});
} }
/// Should this node participate. /// Should this node participate.
@ -525,19 +515,27 @@ impl Engine for Tendermint {
} }
} }
fn handle_message(&self, rlp: &[u8]) -> Result<(), Error> { fn handle_message(&self, rlp: &[u8]) -> Result<(), EngineError> {
fn fmt_err<T: ::std::fmt::Debug>(x: T) -> EngineError {
EngineError::MalformedMessage(format!("{:?}", x))
}
let rlp = UntrustedRlp::new(rlp); let rlp = UntrustedRlp::new(rlp);
let message: ConsensusMessage = rlp.as_val()?; let message: ConsensusMessage = rlp.as_val().map_err(fmt_err)?;
if !self.votes.is_old_or_known(&message) { if !self.votes.is_old_or_known(&message) {
let sender = public_to_address(&recover(&message.signature.into(), &keccak(rlp.at(1)?.as_raw()))?); let msg_hash = keccak(rlp.at(1).map_err(fmt_err)?.as_raw());
let sender = public_to_address(
&recover(&message.signature.into(), &msg_hash).map_err(fmt_err)?
);
if !self.is_authority(&sender) { if !self.is_authority(&sender) {
return Err(EngineError::NotAuthorized(sender).into()); return Err(EngineError::NotAuthorized(sender));
} }
self.broadcast_message(rlp.as_raw().to_vec()); self.broadcast_message(rlp.as_raw().to_vec());
if let Some(double) = self.votes.vote(message.clone(), &sender) { if let Some(double) = self.votes.vote(message.clone(), &sender) {
let height = message.vote_step.height as BlockNumber; let height = message.vote_step.height as BlockNumber;
self.validators.report_malicious(&sender, height, height, ::rlp::encode(&double).into_vec()); self.validators.report_malicious(&sender, height, height, ::rlp::encode(&double).into_vec());
return Err(EngineError::DoubleVote(sender).into()); return Err(EngineError::DoubleVote(sender));
} }
trace!(target: "engine", "Handling a valid {:?} from {}.", message, sender); trace!(target: "engine", "Handling a valid {:?} from {}.", message, sender);
self.handle_valid_message(&message); self.handle_valid_message(&message);
@ -545,12 +543,37 @@ impl Engine for Tendermint {
Ok(()) Ok(())
} }
/// Apply the block reward on finalisation of the block. fn on_new_block(&self, block: &mut ExecutedBlock, epoch_begin: bool) -> Result<(), Error> {
fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error>{ if !epoch_begin { return Ok(()) }
::engines::common::bestow_block_reward(block, self)
// genesis is never a new block, but might as well check.
let header = block.fields().header.clone();
let first = header.number() == 0;
let mut call = |to, data| {
let result = self.machine.execute_as_system(
block,
to,
U256::max_value(), // unbounded gas? maybe make configurable.
Some(data),
);
result.map_err(|e| format!("{}", e))
};
self.validators.on_epoch_begin(first, &header, &mut call)
} }
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { /// Apply the block reward on finalisation of the block.
fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error>{
::engines::common::bestow_block_reward(block, self.block_reward)
}
fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> {
Ok(())
}
fn verify_block_basic(&self, header: &Header) -> Result<(), Error> {
let seal_length = header.seal().len(); let seal_length = header.seal().len();
if seal_length == self.seal_fields() { if seal_length == self.seal_fields() {
// Either proposal or commit. // Either proposal or commit.
@ -568,28 +591,7 @@ impl Engine for Tendermint {
} }
} }
fn verify_block_unordered(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_external(&self, header: &Header) -> Result<(), Error> {
Ok(())
}
/// Verify gas limit.
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
if header.number() == 0 {
return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into());
}
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default());
return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into());
}
Ok(())
}
fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
if let Ok(proposal) = ConsensusMessage::new_proposal(header) { if let Ok(proposal) = ConsensusMessage::new_proposal(header) {
let proposer = proposal.verify()?; let proposer = proposal.verify()?;
if !self.is_authority(&proposer) { if !self.is_authority(&proposer) {
@ -630,17 +632,17 @@ impl Engine for Tendermint {
} }
} }
fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, header: &Header, aux: AuxiliaryData)
-> super::EpochChange -> super::EpochChange<EthereumMachine>
{ {
let first = header.number() == 0; let first = header.number() == 0;
self.validators.signals_epoch_end(first, header, block, receipts) self.validators.signals_epoch_end(first, header, aux)
} }
fn is_epoch_end( fn is_epoch_end(
&self, &self,
chain_head: &Header, chain_head: &Header,
_chain: &super::Headers, _chain: &super::Headers<Header>,
transition_store: &super::PendingTransitionStore, transition_store: &super::PendingTransitionStore,
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
let first = chain_head.number() == 0; let first = chain_head.number() == 0;
@ -657,14 +659,14 @@ impl Engine for Tendermint {
None None
} }
fn epoch_verifier<'a>(&self, _header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a> { fn epoch_verifier<'a>(&self, _header: &Header, proof: &'a [u8]) -> ConstructedVerifier<'a, EthereumMachine> {
let (signal_number, set_proof, finality_proof) = match destructure_proofs(proof) { let (signal_number, set_proof, finality_proof) = match destructure_proofs(proof) {
Ok(x) => x, Ok(x) => x,
Err(e) => return ConstructedVerifier::Err(e), Err(e) => return ConstructedVerifier::Err(e),
}; };
let first = signal_number == 0; let first = signal_number == 0;
match self.validators.epoch_set(first, self, signal_number, set_proof) { match self.validators.epoch_set(first, &self.machine, signal_number, set_proof) {
Ok((list, finalize)) => { Ok((list, finalize)) => {
let verifier = Box::new(EpochVerifier { let verifier = Box::new(EpochVerifier {
subchain_validators: list, subchain_validators: list,
@ -785,7 +787,7 @@ mod tests {
use tests::helpers::*; use tests::helpers::*;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use spec::Spec; use spec::Spec;
use engines::{Engine, EngineError, Seal}; use engines::{EthEngine, EngineError, Seal};
use engines::epoch::EpochVerifier; use engines::epoch::EpochVerifier;
use super::*; use super::*;
@ -810,7 +812,7 @@ mod tests {
} }
} }
fn vote<F>(engine: &Engine, signer: F, height: usize, view: usize, step: Step, block_hash: Option<H256>) -> Bytes where F: FnOnce(H256) -> Result<H520, ::account_provider::SignError> { fn vote<F>(engine: &EthEngine, signer: F, height: usize, view: usize, step: Step, block_hash: Option<H256>) -> Bytes where F: FnOnce(H256) -> Result<H520, ::account_provider::SignError> {
let mi = message_info_rlp(&VoteStep::new(height, view, step), block_hash); let mi = message_info_rlp(&VoteStep::new(height, view, step), block_hash);
let m = message_full_rlp(&signer(keccak(&mi)).unwrap().into(), &mi); let m = message_full_rlp(&signer(keccak(&mi)).unwrap().into(), &mi);
engine.handle_message(&m).unwrap(); engine.handle_message(&m).unwrap();
@ -834,7 +836,7 @@ mod tests {
addr addr
} }
fn insert_and_register(tap: &Arc<AccountProvider>, engine: &Engine, acc: &str) -> Address { fn insert_and_register(tap: &Arc<AccountProvider>, engine: &EthEngine, acc: &str) -> Address {
let addr = insert_and_unlock(tap, acc); let addr = insert_and_unlock(tap, acc);
engine.set_signer(tap.clone(), addr.clone(), acc.into()); engine.set_signer(tap.clone(), addr.clone(), acc.into());
addr addr
@ -871,7 +873,7 @@ mod tests {
let engine = Spec::new_test_tendermint().engine; let engine = Spec::new_test_tendermint().engine;
let header = Header::default(); let header = Header::default();
let verify_result = engine.verify_block_basic(&header, None); let verify_result = engine.verify_block_basic(&header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::InvalidSealArity(_))) => {}, Err(Error::Block(BlockError::InvalidSealArity(_))) => {},
@ -896,14 +898,14 @@ mod tests {
let seal = proposal_seal(&tap, &header, 0); let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal); header.set_seal(seal);
// Good proposer. // Good proposer.
assert!(engine.verify_block_external(&header, None).is_ok()); assert!(engine.verify_block_external(&header).is_ok());
let validator = insert_and_unlock(&tap, "0"); let validator = insert_and_unlock(&tap, "0");
header.set_author(validator); header.set_author(validator);
let seal = proposal_seal(&tap, &header, 0); let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal); header.set_seal(seal);
// Bad proposer. // Bad proposer.
match engine.verify_block_external(&header, None) { match engine.verify_block_external(&header) {
Err(Error::Engine(EngineError::NotProposer(_))) => {}, Err(Error::Engine(EngineError::NotProposer(_))) => {},
_ => panic!(), _ => panic!(),
} }
@ -913,7 +915,7 @@ mod tests {
let seal = proposal_seal(&tap, &header, 0); let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal); header.set_seal(seal);
// Not authority. // Not authority.
match engine.verify_block_external(&header, None) { match engine.verify_block_external(&header) {
Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
_ => panic!(), _ => panic!(),
}; };
@ -943,7 +945,7 @@ mod tests {
header.set_seal(seal.clone()); header.set_seal(seal.clone());
// One good signature is not enough. // One good signature is not enough.
match engine.verify_block_external(&header, None) { match engine.verify_block_external(&header) {
Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {}, Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {},
_ => panic!(), _ => panic!(),
} }
@ -954,7 +956,7 @@ mod tests {
seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec();
header.set_seal(seal.clone()); header.set_seal(seal.clone());
assert!(engine.verify_block_external(&header, None).is_ok()); assert!(engine.verify_block_external(&header).is_ok());
let bad_voter = insert_and_unlock(&tap, "101"); let bad_voter = insert_and_unlock(&tap, "101");
let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap(); let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap();
@ -963,7 +965,7 @@ mod tests {
header.set_seal(seal); header.set_seal(seal);
// One good and one bad signature. // One good and one bad signature.
match engine.verify_block_external(&header, None) { match engine.verify_block_external(&header) {
Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
_ => panic!(), _ => panic!(),
}; };

View File

@ -18,6 +18,7 @@
use ethjson; use ethjson;
use time::Duration; use time::Duration;
use bigint::prelude::U256;
use super::super::validator_set::{ValidatorSet, new_validator_set}; use super::super::validator_set::{ValidatorSet, new_validator_set};
use super::super::transition::Timeouts; use super::super::transition::Timeouts;
use super::Step; use super::Step;
@ -28,6 +29,8 @@ pub struct TendermintParams {
pub validators: Box<ValidatorSet>, pub validators: Box<ValidatorSet>,
/// Timeout durations for different steps. /// Timeout durations for different steps.
pub timeouts: TendermintTimeouts, pub timeouts: TendermintTimeouts,
/// Reward per block in base units.
pub block_reward: U256,
} }
/// Base timeout of each step in ms. /// Base timeout of each step in ms.
@ -81,6 +84,7 @@ impl From<ethjson::spec::TendermintParams> for TendermintParams {
precommit: p.timeout_precommit.map_or(dt.precommit, to_duration), precommit: p.timeout_precommit.map_or(dt.precommit, to_duration),
commit: p.timeout_commit.map_or(dt.commit, to_duration), commit: p.timeout_commit.map_or(dt.commit, to_duration),
}, },
block_reward: p.block_reward.map_or(U256::default(), Into::into),
} }
} }
} }

View File

@ -20,6 +20,7 @@ use std::sync::Weak;
use time::Duration; use time::Duration;
use io::{IoContext, IoHandler, TimerToken}; use io::{IoContext, IoHandler, TimerToken};
use engines::Engine; use engines::Engine;
use parity_machine::Machine;
/// Timeouts lookup /// Timeouts lookup
pub trait Timeouts<S: Sync + Send + Clone>: Send + Sync { pub trait Timeouts<S: Sync + Send + Clone>: Send + Sync {
@ -31,14 +32,14 @@ pub trait Timeouts<S: Sync + Send + Clone>: Send + Sync {
} }
/// Timeout transition handling. /// Timeout transition handling.
pub struct TransitionHandler<S: Sync + Send + Clone> { pub struct TransitionHandler<S: Sync + Send + Clone, M: Machine> {
engine: Weak<Engine>, engine: Weak<Engine<M>>,
timeouts: Box<Timeouts<S>>, timeouts: Box<Timeouts<S>>,
} }
impl<S> TransitionHandler<S> where S: Sync + Send + Clone { impl<S, M: Machine> TransitionHandler<S, M> where S: Sync + Send + Clone {
/// New step caller by timeouts. /// New step caller by timeouts.
pub fn new(engine: Weak<Engine>, timeouts: Box<Timeouts<S>>) -> Self { pub fn new(engine: Weak<Engine<M>>, timeouts: Box<Timeouts<S>>) -> Self {
TransitionHandler { TransitionHandler {
engine: engine, engine: engine,
timeouts: timeouts, timeouts: timeouts,
@ -54,7 +55,9 @@ fn set_timeout<S: Sync + Send + Clone>(io: &IoContext<S>, timeout: Duration) {
.unwrap_or_else(|e| warn!(target: "engine", "Failed to set consensus step timeout: {}.", e)) .unwrap_or_else(|e| warn!(target: "engine", "Failed to set consensus step timeout: {}.", e))
} }
impl<S> IoHandler<S> for TransitionHandler<S> where S: Sync + Send + Clone + 'static { impl<S, M> IoHandler<S> for TransitionHandler<S, M>
where S: Sync + Send + Clone + 'static, M: Machine
{
fn initialize(&self, io: &IoContext<S>) { fn initialize(&self, io: &IoContext<S>) {
let initial = self.timeouts.initial(); let initial = self.timeouts.initial();
trace!(target: "engine", "Setting the initial timeout to {}.", initial); trace!(target: "engine", "Setting the initial timeout to {}.", initial);

View File

@ -27,8 +27,8 @@ use futures::Future;
use native_contracts::ValidatorReport as Provider; use native_contracts::ValidatorReport as Provider;
use client::EngineClient; use client::EngineClient;
use engines::{Call, Engine};
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use machine::{AuxiliaryData, Call, EthereumMachine};
use super::{ValidatorSet, SimpleList, SystemCall}; use super::{ValidatorSet, SimpleList, SystemCall};
use super::safe_contract::ValidatorSafeContract; use super::safe_contract::ValidatorSafeContract;
@ -91,14 +91,13 @@ impl ValidatorSet for ValidatorContract {
&self, &self,
first: bool, first: bool,
header: &Header, header: &Header,
block: Option<&[u8]>, aux: AuxiliaryData,
receipts: Option<&[::receipt::Receipt]>, ) -> ::engines::EpochChange<EthereumMachine> {
) -> ::engines::EpochChange { self.validators.signals_epoch_end(first, header, aux)
self.validators.signals_epoch_end(first, header, block, receipts)
} }
fn epoch_set(&self, first: bool, engine: &Engine, number: BlockNumber, proof: &[u8]) -> Result<(SimpleList, Option<H256>), ::error::Error> { fn epoch_set(&self, first: bool, machine: &EthereumMachine, number: BlockNumber, proof: &[u8]) -> Result<(SimpleList, Option<H256>), ::error::Error> {
self.validators.epoch_set(first, engine, number, proof) self.validators.epoch_set(first, machine, number, proof)
} }
fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool { fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool {
@ -182,7 +181,7 @@ mod tests {
header.set_parent_hash(client.chain_info().best_block_hash); header.set_parent_hash(client.chain_info().best_block_hash);
// `reportBenign` when the designated proposer releases block from the future (bad clock). // `reportBenign` when the designated proposer releases block from the future (bad clock).
assert!(client.engine().verify_block_external(&header, None).is_err()); assert!(client.engine().verify_block_external(&header).is_err());
// Seal a block. // Seal a block.
client.engine().step(); client.engine().step();
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);
@ -190,7 +189,7 @@ mod tests {
assert_eq!(client.call_contract(BlockId::Latest, validator_contract, "d8f2e0bf".from_hex().unwrap()).unwrap().to_hex(), "0000000000000000000000007d577a597b2742b498cb5cf0c26cdcd726d39e6e"); assert_eq!(client.call_contract(BlockId::Latest, validator_contract, "d8f2e0bf".from_hex().unwrap()).unwrap().to_hex(), "0000000000000000000000007d577a597b2742b498cb5cf0c26cdcd726d39e6e");
// Simulate a misbehaving validator by handling a double proposal. // Simulate a misbehaving validator by handling a double proposal.
let header = client.best_block_header().decode(); let header = client.best_block_header().decode();
assert!(client.engine().verify_block_family(&header, &header, None).is_err()); assert!(client.engine().verify_block_family(&header, &header).is_err());
// Seal a block. // Seal a block.
client.engine().step(); client.engine().step();
client.engine().step(); client.engine().step();

View File

@ -31,6 +31,7 @@ use bytes::Bytes;
use ethjson::spec::ValidatorSet as ValidatorSpec; use ethjson::spec::ValidatorSet as ValidatorSpec;
use client::EngineClient; use client::EngineClient;
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use machine::{AuxiliaryData, Call, EthereumMachine};
#[cfg(test)] #[cfg(test)]
pub use self::test::TestSet; pub use self::test::TestSet;
@ -39,8 +40,6 @@ use self::contract::ValidatorContract;
use self::safe_contract::ValidatorSafeContract; use self::safe_contract::ValidatorSafeContract;
use self::multi::Multi; use self::multi::Multi;
use super::{Call, Engine};
/// A system-calling closure. Enacts calls on a block's state from the system address. /// A system-calling closure. Enacts calls on a block's state from the system address.
pub type SystemCall<'a> = FnMut(Address, Bytes) -> Result<Bytes, String> + 'a; pub type SystemCall<'a> = FnMut(Address, Bytes) -> Result<Bytes, String> + 'a;
@ -113,9 +112,8 @@ pub trait ValidatorSet: Send + Sync {
&self, &self,
first: bool, first: bool,
header: &Header, header: &Header,
block: Option<&[u8]>, aux: AuxiliaryData,
receipts: Option<&[::receipt::Receipt]>, ) -> ::engines::EpochChange<EthereumMachine>;
) -> ::engines::EpochChange;
/// Recover the validator set from the given proof, the block number, and /// Recover the validator set from the given proof, the block number, and
/// whether this header is first in its set. /// whether this header is first in its set.
@ -125,7 +123,7 @@ pub trait ValidatorSet: Send + Sync {
/// ///
/// Returns the set, along with a flag indicating whether finality of a specific /// Returns the set, along with a flag indicating whether finality of a specific
/// hash should be proven. /// hash should be proven.
fn epoch_set(&self, first: bool, engine: &Engine, number: BlockNumber, proof: &[u8]) fn epoch_set(&self, first: bool, machine: &EthereumMachine, number: BlockNumber, proof: &[u8])
-> Result<(SimpleList, Option<H256>), ::error::Error>; -> Result<(SimpleList, Option<H256>), ::error::Error>;
/// Checks if a given address is a validator, with the given function /// Checks if a given address is a validator, with the given function

View File

@ -18,7 +18,6 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::sync::Weak; use std::sync::Weak;
use engines::{Call, Engine};
use bigint::hash::H256; use bigint::hash::H256;
use parking_lot::RwLock; use parking_lot::RwLock;
use util::Address; use util::Address;
@ -26,6 +25,7 @@ use bytes::Bytes;
use ids::BlockId; use ids::BlockId;
use header::{BlockNumber, Header}; use header::{BlockNumber, Header};
use client::EngineClient; use client::EngineClient;
use machine::{AuxiliaryData, Call, EthereumMachine};
use super::{SystemCall, ValidatorSet}; use super::{SystemCall, ValidatorSet};
type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>; type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>;
@ -93,20 +93,20 @@ impl ValidatorSet for Multi {
set.is_epoch_end(first, chain_head) set.is_epoch_end(first, chain_head)
} }
fn signals_epoch_end(&self, _first: bool, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, _first: bool, header: &Header, aux: AuxiliaryData)
-> ::engines::EpochChange -> ::engines::EpochChange<EthereumMachine>
{ {
let (set_block, set) = self.correct_set_by_number(header.number()); let (set_block, set) = self.correct_set_by_number(header.number());
let first = set_block == header.number(); let first = set_block == header.number();
set.signals_epoch_end(first, header, block, receipts) set.signals_epoch_end(first, header, aux)
} }
fn epoch_set(&self, _first: bool, engine: &Engine, number: BlockNumber, proof: &[u8]) -> Result<(super::SimpleList, Option<H256>), ::error::Error> { fn epoch_set(&self, _first: bool, machine: &EthereumMachine, number: BlockNumber, proof: &[u8]) -> Result<(super::SimpleList, Option<H256>), ::error::Error> {
let (set_block, set) = self.correct_set_by_number(number); let (set_block, set) = self.correct_set_by_number(number);
let first = set_block == number; let first = set_block == number;
set.epoch_set(first, engine, number, proof) set.epoch_set(first, machine, number, proof)
} }
fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool { fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool {
@ -227,7 +227,7 @@ mod tests {
let mut header = Header::new(); let mut header = Header::new();
header.set_number(499); header.set_number(499);
match multi.signals_epoch_end(false, &header, None, None) { match multi.signals_epoch_end(false, &header, Default::default()) {
EpochChange::No => {}, EpochChange::No => {},
_ => panic!("Expected no epoch signal change."), _ => panic!("Expected no epoch signal change."),
} }
@ -235,7 +235,7 @@ mod tests {
header.set_number(500); header.set_number(500);
match multi.signals_epoch_end(false, &header, None, None) { match multi.signals_epoch_end(false, &header, Default::default()) {
EpochChange::No => {}, EpochChange::No => {},
_ => panic!("Expected no epoch signal change."), _ => panic!("Expected no epoch signal change."),
} }

View File

@ -33,7 +33,7 @@ use rlp::{UntrustedRlp, RlpStream};
use basic_types::LogBloom; use basic_types::LogBloom;
use client::EngineClient; use client::EngineClient;
use engines::{Call, Engine}; use machine::{AuxiliaryData, Call, EthereumMachine, AuxiliaryRequest};
use header::Header; use header::Header;
use ids::BlockId; use ids::BlockId;
use log_entry::LogEntry; use log_entry::LogEntry;
@ -58,19 +58,19 @@ struct StateProof {
provider: Provider, provider: Provider,
} }
impl ::engines::StateDependentProof for StateProof { impl ::engines::StateDependentProof<EthereumMachine> for StateProof {
fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String> { fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String> {
prove_initial(&self.provider, &*self.header.lock(), caller) prove_initial(&self.provider, &*self.header.lock(), caller)
} }
fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String> { fn check_proof(&self, machine: &EthereumMachine, proof: &[u8]) -> Result<(), String> {
let (header, state_items) = decode_first_proof(&UntrustedRlp::new(proof)) let (header, state_items) = decode_first_proof(&UntrustedRlp::new(proof))
.map_err(|e| format!("proof incorrectly encoded: {}", e))?; .map_err(|e| format!("proof incorrectly encoded: {}", e))?;
if &header != &*self.header.lock(){ if &header != &*self.header.lock(){
return Err("wrong header in proof".into()); return Err("wrong header in proof".into());
} }
check_first_proof(engine, &self.provider, header, &state_items).map(|_| ()) check_first_proof(machine, &self.provider, header, &state_items).map(|_| ())
} }
} }
@ -94,7 +94,7 @@ fn encode_first_proof(header: &Header, state_items: &[Vec<u8>]) -> Bytes {
} }
// check a first proof: fetch the validator set at the given block. // check a first proof: fetch the validator set at the given block.
fn check_first_proof(engine: &Engine, provider: &Provider, old_header: Header, state_items: &[DBValue]) fn check_first_proof(machine: &EthereumMachine, provider: &Provider, old_header: Header, state_items: &[DBValue])
-> Result<Vec<Address>, String> -> Result<Vec<Address>, String>
{ {
use transaction::{Action, Transaction}; use transaction::{Action, Transaction};
@ -117,12 +117,12 @@ fn check_first_proof(engine: &Engine, provider: &Provider, old_header: Header, s
gas_used: 0.into(), gas_used: 0.into(),
}; };
// check state proof using given engine. // check state proof using given machine.
let number = old_header.number(); let number = old_header.number();
provider.get_validators(move |a, d| { provider.get_validators(move |a, d| {
let from = Address::default(); let from = Address::default();
let tx = Transaction { let tx = Transaction {
nonce: engine.account_start_nonce(number), nonce: machine.account_start_nonce(number),
action: Action::Call(a), action: Action::Call(a),
gas: PROVIDED_GAS.into(), gas: PROVIDED_GAS.into(),
gas_price: U256::default(), gas_price: U256::default(),
@ -134,7 +134,7 @@ fn check_first_proof(engine: &Engine, provider: &Provider, old_header: Header, s
state_items, state_items,
*old_header.state_root(), *old_header.state_root(),
&tx, &tx,
engine, machine,
&env_info, &env_info,
); );
@ -336,9 +336,11 @@ impl ValidatorSet for ValidatorSafeContract {
None // no immediate transitions to contract. None // no immediate transitions to contract.
} }
fn signals_epoch_end(&self, first: bool, header: &Header, _block: Option<&[u8]>, receipts: Option<&[Receipt]>) fn signals_epoch_end(&self, first: bool, header: &Header, aux: AuxiliaryData)
-> ::engines::EpochChange -> ::engines::EpochChange<EthereumMachine>
{ {
let receipts = aux.receipts;
// transition to the first block of a contract requires finality but has no log event. // transition to the first block of a contract requires finality but has no log event.
if first { if first {
debug!(target: "engine", "signalling transition to fresh contract."); debug!(target: "engine", "signalling transition to fresh contract.");
@ -358,7 +360,7 @@ impl ValidatorSet for ValidatorSafeContract {
trace!(target: "engine", "detected epoch change event bloom"); trace!(target: "engine", "detected epoch change event bloom");
match receipts { match receipts {
None => ::engines::EpochChange::Unsure(::engines::Unsure::NeedsReceipts), None => ::engines::EpochChange::Unsure(AuxiliaryRequest::Receipts),
Some(receipts) => match self.extract_from_event(bloom, header, receipts) { Some(receipts) => match self.extract_from_event(bloom, header, receipts) {
None => ::engines::EpochChange::No, None => ::engines::EpochChange::No,
Some(list) => { Some(list) => {
@ -372,7 +374,7 @@ impl ValidatorSet for ValidatorSafeContract {
} }
} }
fn epoch_set(&self, first: bool, engine: &Engine, _number: ::header::BlockNumber, proof: &[u8]) fn epoch_set(&self, first: bool, machine: &EthereumMachine, _number: ::header::BlockNumber, proof: &[u8])
-> Result<(SimpleList, Option<H256>), ::error::Error> -> Result<(SimpleList, Option<H256>), ::error::Error>
{ {
let rlp = UntrustedRlp::new(proof); let rlp = UntrustedRlp::new(proof);
@ -383,7 +385,7 @@ impl ValidatorSet for ValidatorSafeContract {
let (old_header, state_items) = decode_first_proof(&rlp)?; let (old_header, state_items) = decode_first_proof(&rlp)?;
let number = old_header.number(); let number = old_header.number();
let old_hash = old_header.hash(); let old_hash = old_header.hash();
let addresses = check_first_proof(engine, &self.provider, old_header, &state_items) let addresses = check_first_proof(machine, &self.provider, old_header, &state_items)
.map_err(::engines::EngineError::InsufficientProof)?; .map_err(::engines::EngineError::InsufficientProof)?;
trace!(target: "engine", "extracted epoch set at #{}: {} addresses", trace!(target: "engine", "extracted epoch set at #{}: {} addresses",
@ -561,7 +563,8 @@ mod tests {
#[test] #[test]
fn detects_bloom() { fn detects_bloom() {
use header::Header; use header::Header;
use engines::{EpochChange, Unsure}; use engines::EpochChange;
use machine::AuxiliaryRequest;
use log_entry::LogEntry; use log_entry::LogEntry;
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None);
@ -581,7 +584,7 @@ mod tests {
}; };
new_header.set_log_bloom(event.bloom()); new_header.set_log_bloom(event.bloom());
match engine.signals_epoch_end(&new_header, None, None) { match engine.signals_epoch_end(&new_header, Default::default()) {
EpochChange::No => {}, EpochChange::No => {},
_ => panic!("Expected bloom to be unrecognized."), _ => panic!("Expected bloom to be unrecognized."),
}; };
@ -590,8 +593,8 @@ mod tests {
event.topics.push(last_hash); event.topics.push(last_hash);
new_header.set_log_bloom(event.bloom()); new_header.set_log_bloom(event.bloom());
match engine.signals_epoch_end(&new_header, None, None) { match engine.signals_epoch_end(&new_header, Default::default()) {
EpochChange::Unsure(Unsure::NeedsReceipts) => {}, EpochChange::Unsure(AuxiliaryRequest::Receipts) => {},
_ => panic!("Expected bloom to be recognized."), _ => panic!("Expected bloom to be recognized."),
}; };
} }
@ -607,7 +610,7 @@ mod tests {
let mut new_header = Header::default(); let mut new_header = Header::default();
new_header.set_number(0); // so the validator set doesn't look for a log new_header.set_number(0); // so the validator set doesn't look for a log
match engine.signals_epoch_end(&new_header, None, None) { match engine.signals_epoch_end(&new_header, Default::default()) {
EpochChange::Yes(Proof::WithState(_)) => {}, EpochChange::Yes(Proof::WithState(_)) => {},
_ => panic!("Expected state to be required to prove initial signal"), _ => panic!("Expected state to be required to prove initial signal"),
}; };

View File

@ -20,7 +20,7 @@ use heapsize::HeapSizeOf;
use bigint::hash::H256; use bigint::hash::H256;
use util::Address; use util::Address;
use engines::{Call, Engine}; use machine::{AuxiliaryData, Call, EthereumMachine};
use header::{BlockNumber, Header}; use header::{BlockNumber, Header};
use super::ValidatorSet; use super::ValidatorSet;
@ -76,13 +76,13 @@ impl ValidatorSet for SimpleList {
} }
} }
fn signals_epoch_end(&self, _: bool, _: &Header, _: Option<&[u8]>, _: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, _: bool, _: &Header, _: AuxiliaryData)
-> ::engines::EpochChange -> ::engines::EpochChange<EthereumMachine>
{ {
::engines::EpochChange::No ::engines::EpochChange::No
} }
fn epoch_set(&self, _first: bool, _: &Engine, _: BlockNumber, _: &[u8]) -> Result<(SimpleList, Option<H256>), ::error::Error> { fn epoch_set(&self, _first: bool, _: &EthereumMachine, _: BlockNumber, _: &[u8]) -> Result<(SimpleList, Option<H256>), ::error::Error> {
Ok((self.clone(), None)) Ok((self.clone(), None))
} }

View File

@ -24,7 +24,7 @@ use bigint::hash::H256;
use util::Address; use util::Address;
use bytes::Bytes; use bytes::Bytes;
use engines::{Call, Engine}; use machine::{AuxiliaryData, Call, EthereumMachine};
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use super::{ValidatorSet, SimpleList}; use super::{ValidatorSet, SimpleList};
@ -58,13 +58,13 @@ impl ValidatorSet for TestSet {
fn is_epoch_end(&self, _first: bool, _chain_head: &Header) -> Option<Vec<u8>> { None } fn is_epoch_end(&self, _first: bool, _chain_head: &Header) -> Option<Vec<u8>> { None }
fn signals_epoch_end(&self, _: bool, _: &Header, _: Option<&[u8]>, _: Option<&[::receipt::Receipt]>) fn signals_epoch_end(&self, _: bool, _: &Header, _: AuxiliaryData)
-> ::engines::EpochChange -> ::engines::EpochChange<EthereumMachine>
{ {
::engines::EpochChange::No ::engines::EpochChange::No
} }
fn epoch_set(&self, _: bool, _: &Engine, _: BlockNumber, _: &[u8]) -> Result<(SimpleList, Option<H256>), ::error::Error> { fn epoch_set(&self, _: bool, _: &EthereumMachine, _: BlockNumber, _: &[u8]) -> Result<(SimpleList, Option<H256>), ::error::Error> {
Ok((self.validator.clone(), None)) Ok((self.validator.clone(), None))
} }

View File

@ -17,33 +17,20 @@
use std::path::Path; use std::path::Path;
use std::cmp; use std::cmp;
use std::collections::{BTreeMap, HashMap}; use std::collections::{BTreeMap, HashMap};
use std::sync::{Arc, Weak}; use std::sync::Arc;
use hash::{KECCAK_EMPTY_LIST_RLP}; use hash::{KECCAK_EMPTY_LIST_RLP};
use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager}; use ethash::{quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor};
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::{H256, H64}; use bigint::hash::{H256, H64};
use util::*;
use unexpected::{OutOfBounds, Mismatch}; use unexpected::{OutOfBounds, Mismatch};
use block::*; use block::*;
use builtin::Builtin; use error::{BlockError, Error};
use vm::EnvInfo; use header::Header;
use error::{BlockError, Error, TransactionError}; use engines::{self, Engine, EthEngine};
use trace::{Tracer, ExecutiveTracer, RewardType};
use header::{Header, BlockNumber};
use state::CleanupMode;
use spec::CommonParams;
use transaction::{UnverifiedTransaction, SignedTransaction};
use engines::{self, Engine};
use evm::Schedule;
use ethjson; use ethjson;
use rlp::{self, UntrustedRlp}; use rlp::{self, UntrustedRlp};
use vm::LastHashes; use machine::EthereumMachine;
use semantic_version::SemanticVersion; use semantic_version::SemanticVersion;
use tx_filter::{TransactionFilter};
use client::EngineClient;
/// Parity tries to round block.gas_limit to multiple of this constant
pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]);
/// Number of blocks in an ethash snapshot. /// Number of blocks in an ethash snapshot.
// make dependent on difficulty incrment divisor? // make dependent on difficulty incrment divisor?
@ -68,12 +55,6 @@ pub struct EthashParams {
pub duration_limit: u64, pub duration_limit: u64,
/// Homestead transition block number. /// Homestead transition block number.
pub homestead_transition: u64, pub homestead_transition: u64,
/// DAO hard-fork transition block (X).
pub dao_hardfork_transition: u64,
/// DAO hard-fork refund contract address (C).
pub dao_hardfork_beneficiary: Address,
/// DAO hard-fork DAO accounts list (L)
pub dao_hardfork_accounts: Vec<Address>,
/// Transition block for a change of difficulty params (currently just bound_divisor). /// Transition block for a change of difficulty params (currently just bound_divisor).
pub difficulty_hardfork_transition: u64, pub difficulty_hardfork_transition: u64,
/// Difficulty param after the difficulty transition. /// Difficulty param after the difficulty transition.
@ -82,30 +63,14 @@ pub struct EthashParams {
pub bomb_defuse_transition: u64, pub bomb_defuse_transition: u64,
/// Number of first block where EIP-100 rules begin. /// Number of first block where EIP-100 rules begin.
pub eip100b_transition: u64, pub eip100b_transition: u64,
/// Number of first block where EIP-150 rules begin.
pub eip150_transition: u64,
/// Number of first block where EIP-160 rules begin.
pub eip160_transition: u64,
/// Number of first block where EIP-161.abc begin.
pub eip161abc_transition: u64,
/// Number of first block where EIP-161.d begins.
pub eip161d_transition: u64,
/// Number of first block where ECIP-1010 begins. /// Number of first block where ECIP-1010 begins.
pub ecip1010_pause_transition: u64, pub ecip1010_pause_transition: u64,
/// Number of first block where ECIP-1010 ends. /// Number of first block where ECIP-1010 ends.
pub ecip1010_continue_transition: u64, pub ecip1010_continue_transition: u64,
/// Total block number for one ECIP-1017 era. /// Total block number for one ECIP-1017 era.
pub ecip1017_era_rounds: u64, pub ecip1017_era_rounds: u64,
/// Maximum amount of code that can be deploying into a contract. /// Block reward in base units.
pub max_code_size: u64, pub block_reward: U256,
/// Number of first block where the max gas limit becomes effective.
pub max_gas_limit_transition: u64,
/// Maximum valid block gas limit,
pub max_gas_limit: U256,
/// Number of first block where the minimum gas price becomes effective.
pub min_gas_price_transition: u64,
/// Do not alow transactions with lower gas price.
pub min_gas_price: U256,
/// EIP-649 transition block. /// EIP-649 transition block.
pub eip649_transition: u64, pub eip649_transition: u64,
/// EIP-649 bomb delay. /// EIP-649 bomb delay.
@ -123,25 +88,14 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
metropolis_difficulty_increment_divisor: p.metropolis_difficulty_increment_divisor.map_or(9, Into::into), metropolis_difficulty_increment_divisor: p.metropolis_difficulty_increment_divisor.map_or(9, Into::into),
duration_limit: p.duration_limit.map_or(0, Into::into), duration_limit: p.duration_limit.map_or(0, Into::into),
homestead_transition: p.homestead_transition.map_or(0, Into::into), homestead_transition: p.homestead_transition.map_or(0, Into::into),
dao_hardfork_transition: p.dao_hardfork_transition.map_or(u64::max_value(), Into::into),
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into),
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(),
difficulty_hardfork_transition: p.difficulty_hardfork_transition.map_or(u64::max_value(), Into::into), difficulty_hardfork_transition: p.difficulty_hardfork_transition.map_or(u64::max_value(), Into::into),
difficulty_hardfork_bound_divisor: p.difficulty_hardfork_bound_divisor.map_or(p.difficulty_bound_divisor.into(), Into::into), difficulty_hardfork_bound_divisor: p.difficulty_hardfork_bound_divisor.map_or(p.difficulty_bound_divisor.into(), Into::into),
bomb_defuse_transition: p.bomb_defuse_transition.map_or(u64::max_value(), Into::into), bomb_defuse_transition: p.bomb_defuse_transition.map_or(u64::max_value(), Into::into),
eip100b_transition: p.eip100b_transition.map_or(u64::max_value(), Into::into), eip100b_transition: p.eip100b_transition.map_or(u64::max_value(), Into::into),
eip150_transition: p.eip150_transition.map_or(0, Into::into),
eip160_transition: p.eip160_transition.map_or(0, Into::into),
eip161abc_transition: p.eip161abc_transition.map_or(0, Into::into),
eip161d_transition: p.eip161d_transition.map_or(u64::max_value(), Into::into),
ecip1010_pause_transition: p.ecip1010_pause_transition.map_or(u64::max_value(), Into::into), ecip1010_pause_transition: p.ecip1010_pause_transition.map_or(u64::max_value(), Into::into),
ecip1010_continue_transition: p.ecip1010_continue_transition.map_or(u64::max_value(), Into::into), ecip1010_continue_transition: p.ecip1010_continue_transition.map_or(u64::max_value(), Into::into),
ecip1017_era_rounds: p.ecip1017_era_rounds.map_or(u64::max_value(), Into::into), ecip1017_era_rounds: p.ecip1017_era_rounds.map_or(u64::max_value(), Into::into),
max_code_size: p.max_code_size.map_or(u64::max_value(), Into::into), block_reward: p.block_reward.map_or_else(Default::default, Into::into),
max_gas_limit_transition: p.max_gas_limit_transition.map_or(u64::max_value(), Into::into),
max_gas_limit: p.max_gas_limit.map_or(U256::max_value(), Into::into),
min_gas_price_transition: p.min_gas_price_transition.map_or(u64::max_value(), Into::into),
min_gas_price: p.min_gas_price.map_or(U256::zero(), Into::into),
eip649_transition: p.eip649_transition.map_or(u64::max_value(), Into::into), eip649_transition: p.eip649_transition.map_or(u64::max_value(), Into::into),
eip649_delay: p.eip649_delay.map_or(DEFAULT_EIP649_DELAY, Into::into), eip649_delay: p.eip649_delay.map_or(DEFAULT_EIP649_DELAY, Into::into),
eip649_reward: p.eip649_reward.map(Into::into), eip649_reward: p.eip649_reward.map(Into::into),
@ -152,27 +106,23 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
/// Engine using Ethash proof-of-work consensus algorithm, suitable for Ethereum /// Engine using Ethash proof-of-work consensus algorithm, suitable for Ethereum
/// mainnet chains in the Olympic, Frontier and Homestead eras. /// mainnet chains in the Olympic, Frontier and Homestead eras.
pub struct Ethash { pub struct Ethash {
params: CommonParams,
ethash_params: EthashParams, ethash_params: EthashParams,
builtins: BTreeMap<Address, Builtin>,
pow: EthashManager, pow: EthashManager,
tx_filter: Option<TransactionFilter>, machine: EthereumMachine,
} }
impl Ethash { impl Ethash {
/// Create a new instance of Ethash engine /// Create a new instance of Ethash engine
pub fn new<T: AsRef<Path>>( pub fn new<T: Into<Option<OptimizeFor>>>(
cache_dir: T, cache_dir: &Path,
params: CommonParams,
ethash_params: EthashParams, ethash_params: EthashParams,
builtins: BTreeMap<Address, Builtin>, machine: EthereumMachine,
optimize_for: T,
) -> Arc<Self> { ) -> Arc<Self> {
Arc::new(Ethash { Arc::new(Ethash {
tx_filter: TransactionFilter::from_params(&params),
params,
ethash_params, ethash_params,
builtins, machine,
pow: EthashManager::new(cache_dir), pow: EthashManager::new(cache_dir.as_ref(), optimize_for.into()),
}) })
} }
} }
@ -185,26 +135,23 @@ impl Ethash {
// for any block in the chain. // for any block in the chain.
// in the future, we might move the Ethash epoch // in the future, we might move the Ethash epoch
// caching onto this mechanism as well. // caching onto this mechanism as well.
impl engines::EpochVerifier for Arc<Ethash> { impl engines::EpochVerifier<EthereumMachine> for Arc<Ethash> {
fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) } fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) }
fn verify_heavy(&self, header: &Header) -> Result<(), Error> { fn verify_heavy(&self, header: &Header) -> Result<(), Error> {
self.verify_block_unordered(header, None) self.verify_block_unordered(header)
} }
} }
impl Engine for Arc<Ethash> { impl Engine<EthereumMachine> for Arc<Ethash> {
fn name(&self) -> &str { "Ethash" } fn name(&self) -> &str { "Ethash" }
fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) } fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
// Two fields - mix fn machine(&self) -> &EthereumMachine { &self.machine }
// Two fields - nonce and mix.
fn seal_fields(&self) -> usize { 2 } fn seal_fields(&self) -> usize { 2 }
fn params(&self) -> &CommonParams { &self.params }
fn additional_params(&self) -> HashMap<String, String> { hash_map!["registrar".to_owned() => self.params().registrar.hex()] } fn additional_params(&self) -> HashMap<String, String> { hash_map!["registrar".to_owned() => self.params().registrar.hex()] }
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
&self.builtins
}
/// Additional engine-specific information for the user/developer concerning `header`. /// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> { fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
if header.seal().len() == self.seal_fields() { if header.seal().len() == self.seal_fields() {
@ -217,90 +164,16 @@ impl Engine for Arc<Ethash> {
} }
} }
fn schedule(&self, block_number: BlockNumber) -> Schedule { fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
trace!(target: "client", "Creating schedule. fCML={}, bGCML={}", self.ethash_params.homestead_transition, self.ethash_params.eip150_transition);
if block_number < self.ethash_params.homestead_transition {
Schedule::new_frontier()
} else if block_number < self.ethash_params.eip150_transition {
Schedule::new_homestead()
} else {
/// There's no max_code_size transition so we tie it to eip161abc
let max_code_size = if block_number >= self.ethash_params.eip161abc_transition { self.ethash_params.max_code_size as usize } else { usize::max_value() };
let mut schedule = Schedule::new_post_eip150(
max_code_size,
block_number >= self.ethash_params.eip160_transition,
block_number >= self.ethash_params.eip161abc_transition,
block_number >= self.ethash_params.eip161d_transition);
self.params().update_schedule(block_number, &mut schedule);
schedule
}
}
fn signing_chain_id(&self, env_info: &EnvInfo) -> Option<u64> {
if env_info.number >= self.params().eip155_transition {
Some(self.params().chain_id)
} else {
None
}
}
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, mut gas_ceil_target: U256) {
let difficulty = self.calculate_difficulty(header, parent); let difficulty = self.calculate_difficulty(header, parent);
if header.number() >= self.ethash_params.max_gas_limit_transition && gas_ceil_target > self.ethash_params.max_gas_limit {
warn!("Gas limit target is limited to {}", self.ethash_params.max_gas_limit);
gas_ceil_target = self.ethash_params.max_gas_limit;
}
let gas_limit = {
let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.params().gas_limit_bound_divisor;
let lower_limit = gas_limit - gas_limit / bound_divisor + 1.into();
let upper_limit = gas_limit + gas_limit / bound_divisor - 1.into();
let gas_limit = if gas_limit < gas_floor_target {
let gas_limit = cmp::min(gas_floor_target, upper_limit);
round_block_gas_limit(gas_limit, lower_limit, upper_limit)
} else if gas_limit > gas_ceil_target {
let gas_limit = cmp::max(gas_ceil_target, lower_limit);
round_block_gas_limit(gas_limit, lower_limit, upper_limit)
} else {
let total_lower_limit = cmp::max(lower_limit, gas_floor_target);
let total_upper_limit = cmp::min(upper_limit, gas_ceil_target);
let gas_limit = cmp::max(gas_floor_target, cmp::min(total_upper_limit,
lower_limit + (header.gas_used().clone() * 6.into() / 5.into()) / bound_divisor));
round_block_gas_limit(gas_limit, total_lower_limit, total_upper_limit)
};
// ensure that we are not violating protocol limits
debug_assert!(gas_limit >= lower_limit);
debug_assert!(gas_limit <= upper_limit);
gas_limit
};
header.set_difficulty(difficulty); header.set_difficulty(difficulty);
header.set_gas_limit(gas_limit);
if header.number() >= self.ethash_params.dao_hardfork_transition &&
header.number() <= self.ethash_params.dao_hardfork_transition + 9 {
header.set_extra_data(b"dao-hard-fork"[..].to_owned());
}
header.note_dirty();
// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number(), header.difficulty(), header.gas_limit());
} }
fn on_new_block( fn on_new_block(
&self, &self,
block: &mut ExecutedBlock, _block: &mut ExecutedBlock,
last_hashes: Arc<LastHashes>,
_begins_epoch: bool, _begins_epoch: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let parent_hash = block.fields().header.parent_hash().clone();
engines::common::push_last_hash(block, last_hashes, self, &parent_hash)?;
if block.fields().header.number() == self.ethash_params.dao_hardfork_transition {
let state = block.fields_mut().state;
for child in &self.ethash_params.dao_hardfork_accounts {
let beneficiary = &self.ethash_params.dao_hardfork_beneficiary;
state.balance(child)
.and_then(|b| state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty))?;
}
}
Ok(()) Ok(())
} }
@ -308,68 +181,54 @@ impl Engine for Arc<Ethash> {
/// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current).
fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
use std::ops::Shr; use std::ops::Shr;
let tracing_enabled = block.tracing_enabled(); use parity_machine::{LiveBlock, WithBalances};
let fields = block.fields_mut();
let reward = if fields.header.number() >= self.ethash_params.eip649_transition { let author = *LiveBlock::header(&*block).author();
self.ethash_params.eip649_reward.unwrap_or(self.params().block_reward) let number = LiveBlock::header(&*block).number();
let reward = if number >= self.ethash_params.eip649_transition {
self.ethash_params.eip649_reward.unwrap_or(self.ethash_params.block_reward)
} else { } else {
self.params().block_reward self.ethash_params.block_reward
}; };
let eras_rounds = self.ethash_params.ecip1017_era_rounds; let eras_rounds = self.ethash_params.ecip1017_era_rounds;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, reward, fields.header.number()); let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, reward, number);
let mut tracer = ExecutiveTracer::default();
let n_uncles = LiveBlock::uncles(&*block).len();
// Bestow block reward // Bestow block reward
let result_block_reward = reward + reward.shr(5) * U256::from(fields.uncles.len()); let result_block_reward = reward + reward.shr(5) * U256::from(n_uncles);
fields.state.add_balance( let mut uncle_rewards = Vec::with_capacity(n_uncles);
fields.header.author(),
&result_block_reward,
CleanupMode::NoEmpty
)?;
if tracing_enabled { self.machine.add_balance(block, &author, &result_block_reward)?;
let block_author = fields.header.author().clone();
tracer.trace_reward(block_author, result_block_reward, RewardType::Block);
}
// Bestow uncle rewards // bestow uncle rewards.
let current_number = fields.header.number(); for u in LiveBlock::uncles(&*block) {
for u in fields.uncles.iter() { let uncle_author = u.author();
let uncle_author = u.author().clone(); let result_uncle_reward = if eras == 0 {
let result_uncle_reward: U256; (reward * U256::from(8 + u.number() - number)).shr(3)
if eras == 0 {
result_uncle_reward = (reward * U256::from(8 + u.number() - current_number)).shr(3);
fields.state.add_balance(
u.author(),
&result_uncle_reward,
CleanupMode::NoEmpty
)
} else { } else {
result_uncle_reward = reward.shr(5); reward.shr(5)
fields.state.add_balance( };
u.author(),
&result_uncle_reward,
CleanupMode::NoEmpty
)
}?;
// Trace uncle rewards uncle_rewards.push((*uncle_author, result_uncle_reward));
if tracing_enabled {
tracer.trace_reward(uncle_author, result_uncle_reward, RewardType::Uncle);
}
} }
// Commit state so that we can actually figure out the state root. for &(ref a, ref reward) in &uncle_rewards {
fields.state.commit()?; self.machine.add_balance(block, a, reward)?;
if tracing_enabled {
fields.traces.as_mut().map(|mut traces| traces.push(tracer.drain()));
}
Ok(())
} }
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { // note and trace.
self.machine.note_rewards(block, &[(author, result_block_reward)], &uncle_rewards)
}
fn verify_local_seal(&self, header: &Header) -> Result<(), Error> {
self.verify_block_basic(header)
.and_then(|_| self.verify_block_unordered(header))
}
fn verify_block_basic(&self, header: &Header) -> Result<(), Error> {
// check the seal fields. // check the seal fields.
if header.seal().len() != self.seal_fields() { if header.seal().len() != self.seal_fields() {
return Err(From::from(BlockError::InvalidSealArity( return Err(From::from(BlockError::InvalidSealArity(
@ -394,12 +253,6 @@ impl Engine for Arc<Ethash> {
return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty }))); return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty })));
} }
if header.number() >= self.ethash_params.dao_hardfork_transition &&
header.number() <= self.ethash_params.dao_hardfork_transition + 9 &&
header.extra_data()[..] != b"dao-hard-fork"[..] {
return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: None, found: 0 })));
}
if header.gas_limit() > &0x7fffffffffffffffu64.into() { if header.gas_limit() > &0x7fffffffffffffffu64.into() {
return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit().clone() }))); return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit().clone() })));
} }
@ -407,7 +260,7 @@ impl Engine for Arc<Ethash> {
Ok(()) Ok(())
} }
fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_unordered(&self, header: &Header) -> Result<(), Error> {
if header.seal().len() != self.seal_fields() { if header.seal().len() != self.seal_fields() {
return Err(From::from(BlockError::InvalidSealArity( return Err(From::from(BlockError::InvalidSealArity(
Mismatch { expected: self.seal_fields(), found: header.seal().len() } Mismatch { expected: self.seal_fields(), found: header.seal().len() }
@ -416,7 +269,7 @@ impl Engine for Arc<Ethash> {
let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, header.nonce().low_u64()); let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, header.nonce().low_u64());
let mix = H256(result.mix_hash); let mix = H256(result.mix_hash);
let difficulty = Ethash::boundary_to_difficulty(&H256(result.value)); let difficulty = Ethash::boundary_to_difficulty(&H256(result.value));
trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, H256(slow_get_seedhash(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), H256(result.mix_hash), H256(result.value)); trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, H256(slow_hash_block_number(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), H256(result.mix_hash), H256(result.value));
if mix != header.mix_hash() { if mix != header.mix_hash() {
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() }))); return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() })));
} }
@ -426,7 +279,7 @@ impl Engine for Arc<Ethash> {
Ok(()) Ok(())
} }
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> {
// we should not calculate difficulty for genesis blocks // we should not calculate difficulty for genesis blocks
if header.number() == 0 { if header.number() == 0 {
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }))); return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
@ -437,81 +290,17 @@ impl Engine for Arc<Ethash> {
if header.difficulty() != &expected_difficulty { if header.difficulty() != &expected_difficulty {
return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty().clone() }))) return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty().clone() })))
} }
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
let parent_gas_limit = *parent.gas_limit();
let min_gas = parent_gas_limit - parent_gas_limit / gas_limit_divisor;
let max_gas = parent_gas_limit + parent_gas_limit / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() })));
}
if header.number() >= self.ethash_params.max_gas_limit_transition && header.gas_limit() > &self.ethash_params.max_gas_limit && header.gas_limit() > &parent_gas_limit {
return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(self.ethash_params.max_gas_limit), found: header.gas_limit().clone() })));
}
Ok(()) Ok(())
} }
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), Error> { fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> engines::ConstructedVerifier<'a, EthereumMachine> {
if header.number() >= self.ethash_params.min_gas_price_transition && t.gas_price < self.ethash_params.min_gas_price {
return Err(TransactionError::InsufficientGasPrice { minimal: self.ethash_params.min_gas_price, got: t.gas_price }.into());
}
let check_low_s = header.number() >= self.ethash_params.homestead_transition;
let chain_id = if header.number() >= self.params().eip155_transition { Some(self.params().chain_id) } else { None };
t.verify_basic(check_low_s, chain_id, false)?;
Ok(())
}
fn verify_transaction(&self, t: UnverifiedTransaction, header: &Header) -> Result<SignedTransaction, Error> {
let signed = SignedTransaction::new(t)?;
if !self.tx_filter.as_ref().map_or(true, |filter| filter.transaction_allowed(header.parent_hash(), &signed)) {
return Err(From::from(TransactionError::NotAllowed));
}
Ok(signed)
}
fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> engines::ConstructedVerifier<'a> {
engines::ConstructedVerifier::Trusted(Box::new(self.clone())) engines::ConstructedVerifier::Trusted(Box::new(self.clone()))
} }
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> { fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS))) Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS)))
} }
fn register_client(&self, client: Weak<EngineClient>) {
if let Some(ref filter) = self.tx_filter {
filter.register_client(client);
}
}
}
// Try to round gas_limit a bit so that:
// 1) it will still be in desired range
// 2) it will be a nearest (with tendency to increase) multiple of PARITY_GAS_LIMIT_DETERMINANT
fn round_block_gas_limit(gas_limit: U256, lower_limit: U256, upper_limit: U256) -> U256 {
let increased_gas_limit = gas_limit + (PARITY_GAS_LIMIT_DETERMINANT - gas_limit % PARITY_GAS_LIMIT_DETERMINANT);
if increased_gas_limit > upper_limit {
let decreased_gas_limit = increased_gas_limit - PARITY_GAS_LIMIT_DETERMINANT;
if decreased_gas_limit < lower_limit {
gas_limit
} else {
decreased_gas_limit
}
} else {
increased_gas_limit
}
}
fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number:u64) -> (u64, U256){
let eras = if block_number != 0 && block_number % era_rounds == 0 {
block_number / era_rounds - 1
} else {
block_number / era_rounds
};
for _ in 0..eras {
reward = reward / U256::from(5) * U256::from(4);
}
(eras, reward)
} }
#[cfg_attr(feature="dev", allow(wrong_self_convention))] #[cfg_attr(feature="dev", allow(wrong_self_convention))]
@ -610,7 +399,7 @@ impl Ethash {
} }
impl Header { impl Header {
/// Get the none field of the header. /// Get the nonce field of the header.
pub fn nonce(&self) -> H64 { pub fn nonce(&self) -> H64 {
rlp::decode(&self.seal()[1]) rlp::decode(&self.seal()[1])
} }
@ -619,29 +408,34 @@ impl Header {
pub fn mix_hash(&self) -> H256 { pub fn mix_hash(&self) -> H256 {
rlp::decode(&self.seal()[0]) rlp::decode(&self.seal()[0])
} }
}
/// Set the nonce and mix hash fields of the header. fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number:u64) -> (u64, U256) {
pub fn set_nonce_and_mix_hash(&mut self, nonce: &H64, mix_hash: &H256) { let eras = if block_number != 0 && block_number % era_rounds == 0 {
self.set_seal(vec![rlp::encode(mix_hash).into_vec(), rlp::encode(nonce).into_vec()]); block_number / era_rounds - 1
} else {
block_number / era_rounds
};
for _ in 0..eras {
reward = reward / U256::from(5) * U256::from(4);
} }
(eras, reward)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str::FromStr; use std::str::FromStr;
use std::collections::BTreeMap;
use std::sync::Arc; use std::sync::Arc;
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::{H64, H256}; use bigint::hash::{H64, H256};
use util::*; use util::*;
use block::*; use block::*;
use tests::helpers::*; use tests::helpers::*;
use engines::Engine;
use error::{BlockError, Error}; use error::{BlockError, Error};
use header::Header; use header::Header;
use spec::Spec; use spec::Spec;
use super::super::{new_morden, new_homestead_test}; use super::super::{new_morden, new_homestead_test_machine};
use super::{Ethash, EthashParams, PARITY_GAS_LIMIT_DETERMINANT, ecip1017_eras_block_reward}; use super::{Ethash, EthashParams, ecip1017_eras_block_reward};
use rlp; use rlp;
fn test_spec() -> Spec { fn test_spec() -> Spec {
@ -660,6 +454,38 @@ mod tests {
assert_eq!(b.state().balance(&Address::zero()).unwrap(), U256::from_str("4563918244f40000").unwrap()); assert_eq!(b.state().balance(&Address::zero()).unwrap(), U256::from_str("4563918244f40000").unwrap());
} }
#[test]
fn has_valid_ecip1017_eras_block_reward() {
let eras_rounds = 5000000;
let start_reward: U256 = "4563918244F40000".parse().unwrap();
let block_number = 0;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(0, eras);
assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward);
let block_number = 5000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(0, eras);
assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward);
let block_number = 10000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(1, eras);
assert_eq!(U256::from_str("3782DACE9D900000").unwrap(), reward);
let block_number = 20000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(3, eras);
assert_eq!(U256::from_str("2386F26FC1000000").unwrap(), reward);
let block_number = 80000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(15, eras);
assert_eq!(U256::from_str("271000000000000").unwrap(), reward);
}
#[test] #[test]
fn on_close_block_with_uncle() { fn on_close_block_with_uncle() {
let spec = test_spec(); let spec = test_spec();
@ -698,10 +524,9 @@ mod tests {
#[test] #[test]
fn can_do_seal_verification_fail() { fn can_do_seal_verification_fail() {
let engine = test_spec().engine; let engine = test_spec().engine;
//let engine = Ethash::new_test(test_spec());
let header: Header = Header::default(); let header: Header = Header::default();
let verify_result = engine.verify_block_basic(&header, None); let verify_result = engine.verify_block_basic(&header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::InvalidSealArity(_))) => {}, Err(Error::Block(BlockError::InvalidSealArity(_))) => {},
@ -716,7 +541,7 @@ mod tests {
let mut header: Header = Header::default(); let mut header: Header = Header::default();
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]); header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
let verify_result = engine.verify_block_basic(&header, None); let verify_result = engine.verify_block_basic(&header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::DifficultyOutOfBounds(_))) => {}, Err(Error::Block(BlockError::DifficultyOutOfBounds(_))) => {},
@ -732,7 +557,7 @@ mod tests {
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]); header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap()); header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
let verify_result = engine.verify_block_basic(&header, None); let verify_result = engine.verify_block_basic(&header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::InvalidProofOfWork(_))) => {}, Err(Error::Block(BlockError::InvalidProofOfWork(_))) => {},
@ -746,7 +571,7 @@ mod tests {
let engine = test_spec().engine; let engine = test_spec().engine;
let header: Header = Header::default(); let header: Header = Header::default();
let verify_result = engine.verify_block_unordered(&header, None); let verify_result = engine.verify_block_unordered(&header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::InvalidSealArity(_))) => {}, Err(Error::Block(BlockError::InvalidSealArity(_))) => {},
@ -760,7 +585,7 @@ mod tests {
let engine = test_spec().engine; let engine = test_spec().engine;
let mut header: Header = Header::default(); let mut header: Header = Header::default();
header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]); header.set_seal(vec![rlp::encode(&H256::zero()).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
let verify_result = engine.verify_block_unordered(&header, None); let verify_result = engine.verify_block_unordered(&header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::MismatchedH256SealElement(_))) => {}, Err(Error::Block(BlockError::MismatchedH256SealElement(_))) => {},
@ -776,7 +601,7 @@ mod tests {
header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")).into_vec(), rlp::encode(&H64::zero()).into_vec()]); header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap()); header.set_difficulty(U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa").unwrap());
let verify_result = engine.verify_block_unordered(&header, None); let verify_result = engine.verify_block_unordered(&header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::InvalidProofOfWork(_))) => {}, Err(Error::Block(BlockError::InvalidProofOfWork(_))) => {},
@ -791,7 +616,7 @@ mod tests {
let header: Header = Header::default(); let header: Header = Header::default();
let parent_header: Header = Header::default(); let parent_header: Header = Header::default();
let verify_result = engine.verify_block_family(&header, &parent_header, None); let verify_result = engine.verify_block_family(&header, &parent_header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::RidiculousNumber(_))) => {}, Err(Error::Block(BlockError::RidiculousNumber(_))) => {},
@ -808,7 +633,7 @@ mod tests {
let mut parent_header: Header = Header::default(); let mut parent_header: Header = Header::default();
parent_header.set_number(1); parent_header.set_number(1);
let verify_result = engine.verify_block_family(&header, &parent_header, None); let verify_result = engine.verify_block_family(&header, &parent_header);
match verify_result { match verify_result {
Err(Error::Block(BlockError::InvalidDifficulty(_))) => {}, Err(Error::Block(BlockError::InvalidDifficulty(_))) => {},
@ -817,24 +642,6 @@ mod tests {
} }
} }
#[test]
fn can_verify_block_family_gas_fail() {
let engine = test_spec().engine;
let mut header: Header = Header::default();
header.set_number(2);
header.set_difficulty(U256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap());
let mut parent_header: Header = Header::default();
parent_header.set_number(1);
let verify_result = engine.verify_block_family(&header, &parent_header, None);
match verify_result {
Err(Error::Block(BlockError::InvalidGasLimit(_))) => {},
Err(_) => { panic!("should be invalid difficulty fail (got {:?})", verify_result); },
_ => { panic!("Should be error, got Ok"); },
}
}
#[test] #[test]
fn test_difficulty_to_boundary() { fn test_difficulty_to_boundary() {
// result of f(0) is undefined, so do not assert the result // result of f(0) is undefined, so do not assert the result
@ -847,9 +654,9 @@ mod tests {
#[test] #[test]
fn difficulty_frontier() { fn difficulty_frontier() {
let spec = new_homestead_test(); let machine = new_homestead_test_machine();
let ethparams = get_default_ethash_params(); let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new()); let ethash = Ethash::new(&::std::env::temp_dir(), ethparams, machine, None);
let mut parent_header = Header::default(); let mut parent_header = Header::default();
parent_header.set_number(1000000); parent_header.set_number(1000000);
@ -865,9 +672,9 @@ mod tests {
#[test] #[test]
fn difficulty_homestead() { fn difficulty_homestead() {
let spec = new_homestead_test(); let machine = new_homestead_test_machine();
let ethparams = get_default_ethash_params(); let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new()); let ethash = Ethash::new(&::std::env::temp_dir(), ethparams, machine, None);
let mut parent_header = Header::default(); let mut parent_header = Header::default();
parent_header.set_number(1500000); parent_header.set_number(1500000);
@ -881,46 +688,14 @@ mod tests {
assert_eq!(U256::from_str("1fc50f118efe").unwrap(), difficulty); assert_eq!(U256::from_str("1fc50f118efe").unwrap(), difficulty);
} }
#[test]
fn has_valid_ecip1017_eras_block_reward() {
let eras_rounds = 5000000;
let start_reward: U256 = "4563918244F40000".parse().unwrap();
let block_number = 0;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(0, eras);
assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward);
let block_number = 5000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(0, eras);
assert_eq!(U256::from_str("4563918244F40000").unwrap(), reward);
let block_number = 10000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(1, eras);
assert_eq!(U256::from_str("3782DACE9D900000").unwrap(), reward);
let block_number = 20000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(3, eras);
assert_eq!(U256::from_str("2386F26FC1000000").unwrap(), reward);
let block_number = 80000000;
let (eras, reward) = ecip1017_eras_block_reward(eras_rounds, start_reward, block_number);
assert_eq!(15, eras);
assert_eq!(U256::from_str("271000000000000").unwrap(), reward);
}
#[test] #[test]
fn difficulty_classic_bomb_delay() { fn difficulty_classic_bomb_delay() {
let spec = new_homestead_test(); let machine = new_homestead_test_machine();
let ethparams = EthashParams { let ethparams = EthashParams {
ecip1010_pause_transition: 3000000, ecip1010_pause_transition: 3000000,
..get_default_ethash_params() ..get_default_ethash_params()
}; };
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new()); let ethash = Ethash::new(&::std::env::temp_dir(), ethparams, machine, None);
let mut parent_header = Header::default(); let mut parent_header = Header::default();
parent_header.set_number(3500000); parent_header.set_number(3500000);
@ -948,13 +723,13 @@ mod tests {
#[test] #[test]
fn test_difficulty_bomb_continue() { fn test_difficulty_bomb_continue() {
let spec = new_homestead_test(); let machine = new_homestead_test_machine();
let ethparams = EthashParams { let ethparams = EthashParams {
ecip1010_pause_transition: 3000000, ecip1010_pause_transition: 3000000,
ecip1010_continue_transition: 5000000, ecip1010_continue_transition: 5000000,
..get_default_ethash_params() ..get_default_ethash_params()
}; };
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new()); let ethash = Ethash::new(&::std::env::temp_dir(), ethparams, machine, None);
let mut parent_header = Header::default(); let mut parent_header = Header::default();
parent_header.set_number(5000102); parent_header.set_number(5000102);
@ -996,55 +771,11 @@ mod tests {
); );
} }
#[test]
fn gas_limit_is_multiple_of_determinant() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let mut parent = Header::new();
let mut header = Header::new();
header.set_number(1);
// this test will work for this constant only
assert_eq!(PARITY_GAS_LIMIT_DETERMINANT, U256::from(37));
// when parent.gas_limit < gas_floor_target:
parent.set_gas_limit(U256::from(50_000));
ethash.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000));
assert_eq!(*header.gas_limit(), U256::from(50_024));
// when parent.gas_limit > gas_ceil_target:
parent.set_gas_limit(U256::from(250_000));
ethash.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000));
assert_eq!(*header.gas_limit(), U256::from(249_787));
// when parent.gas_limit is in miner's range
header.set_gas_used(U256::from(150_000));
parent.set_gas_limit(U256::from(150_000));
ethash.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000));
assert_eq!(*header.gas_limit(), U256::from(150_035));
// when parent.gas_limit is in miner's range
// && we can NOT increase it to be multiple of constant
header.set_gas_used(U256::from(150_000));
parent.set_gas_limit(U256::from(150_000));
ethash.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(150_002));
assert_eq!(*header.gas_limit(), U256::from(149_998));
// when parent.gas_limit is in miner's range
// && we can NOT increase it to be multiple of constant
// && we can NOT decrease it to be multiple of constant
header.set_gas_used(U256::from(150_000));
parent.set_gas_limit(U256::from(150_000));
ethash.populate_from_parent(&mut header, &parent, U256::from(150_000), U256::from(150_002));
assert_eq!(*header.gas_limit(), U256::from(150_002));
}
#[test] #[test]
fn difficulty_max_timestamp() { fn difficulty_max_timestamp() {
let spec = new_homestead_test(); let machine = new_homestead_test_machine();
let ethparams = get_default_ethash_params(); let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new()); let ethash = Ethash::new(&::std::env::temp_dir(), ethparams, machine, None);
let mut parent_header = Header::default(); let mut parent_header = Header::default();
parent_header.set_number(1000000); parent_header.set_number(1000000);
@ -1057,82 +788,4 @@ mod tests {
let difficulty = ethash.calculate_difficulty(&header, &parent_header); let difficulty = ethash.calculate_difficulty(&header, &parent_header);
assert_eq!(U256::from(12543204905719u64), difficulty); assert_eq!(U256::from(12543204905719u64), difficulty);
} }
#[test]
fn rejects_blocks_over_max_gas_limit() {
let spec = new_homestead_test();
let mut ethparams = get_default_ethash_params();
ethparams.max_gas_limit_transition = 10;
ethparams.max_gas_limit = 100_000.into();
let mut parent_header = Header::default();
parent_header.set_number(1);
parent_header.set_gas_limit(100_000.into());
let mut header = Header::default();
header.set_number(parent_header.number() + 1);
header.set_gas_limit(100_001.into());
header.set_difficulty(ethparams.minimum_difficulty);
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
assert!(ethash.verify_block_family(&header, &parent_header, None).is_ok());
parent_header.set_number(9);
header.set_number(parent_header.number() + 1);
parent_header.set_gas_limit(99_999.into());
header.set_gas_limit(100_000.into());
assert!(ethash.verify_block_family(&header, &parent_header, None).is_ok());
parent_header.set_gas_limit(200_000.into());
header.set_gas_limit(200_000.into());
assert!(ethash.verify_block_family(&header, &parent_header, None).is_ok());
parent_header.set_gas_limit(100_000.into());
header.set_gas_limit(100_001.into());
assert!(ethash.verify_block_family(&header, &parent_header, None).is_err());
parent_header.set_gas_limit(200_000.into());
header.set_gas_limit(200_001.into());
assert!(ethash.verify_block_family(&header, &parent_header, None).is_err());
}
#[test]
fn rejects_transactions_below_min_gas_price() {
use ethkey::{Generator, Random};
use transaction::{Transaction, Action};
let spec = new_homestead_test();
let mut ethparams = get_default_ethash_params();
ethparams.min_gas_price_transition = 10;
ethparams.min_gas_price = 100000.into();
let mut header = Header::default();
header.set_number(1);
let keypair = Random.generate().unwrap();
let tx1 = Transaction {
action: Action::Create,
value: U256::zero(),
data: Vec::new(),
gas: 100_000.into(),
gas_price: 100_000.into(),
nonce: U256::zero(),
}.sign(keypair.secret(), None).into();
let tx2 = Transaction {
action: Action::Create,
value: U256::zero(),
data: Vec::new(),
gas: 100_000.into(),
gas_price: 99_999.into(),
nonce: U256::zero(),
}.sign(keypair.secret(), None).into();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
assert!(ethash.verify_transaction_basic(&tx1, &header).is_ok());
assert!(ethash.verify_transaction_basic(&tx2, &header).is_ok());
header.set_number(10);
assert!(ethash.verify_transaction_basic(&tx1, &header).is_ok());
assert!(ethash.verify_transaction_basic(&tx2, &header).is_err());
}
} }

View File

@ -27,7 +27,7 @@ pub mod denominations;
pub use self::ethash::{Ethash}; pub use self::ethash::{Ethash};
pub use self::denominations::*; pub use self::denominations::*;
use std::path::Path; use machine::EthereumMachine;
use super::spec::*; use super::spec::*;
/// Most recent fork block that we support on Mainnet. /// Most recent fork block that we support on Mainnet.
@ -39,33 +39,51 @@ pub const FORK_SUPPORTED_ROPSTEN: u64 = 10;
/// Most recent fork block that we support on Kovan. /// Most recent fork block that we support on Kovan.
pub const FORK_SUPPORTED_KOVAN: u64 = 0; pub const FORK_SUPPORTED_KOVAN: u64 = 0;
fn load<'a, T: 'a + Into<Option<&'a Path>>>(cache_dir: T, b: &[u8]) -> Spec { fn load<'a, T: Into<Option<SpecParams<'a>>>>(params: T, b: &[u8]) -> Spec {
match cache_dir.into() { match params.into() {
Some(path) => Spec::load(path, b), Some(params) => Spec::load(params, b),
None => Spec::load(&::std::env::temp_dir(), b) None => Spec::load(&::std::env::temp_dir(), b)
}.expect("chain spec is invalid") }.expect("chain spec is invalid")
} }
fn load_machine(b: &[u8]) -> EthereumMachine {
Spec::load_machine(b).expect("chain spec is invalid")
}
/// Create a new Foundation Olympic chain spec. /// Create a new Foundation Olympic chain spec.
pub fn new_olympic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/olympic.json")) } pub fn new_olympic<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/olympic.json"))
}
/// Create a new Foundation Mainnet chain spec. /// Create a new Foundation Mainnet chain spec.
pub fn new_foundation(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/foundation.json")) } pub fn new_foundation<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/foundation.json"))
}
/// Create a new Classic Mainnet chain spec without the DAO hardfork. /// Create a new Classic Mainnet chain spec without the DAO hardfork.
pub fn new_classic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/classic.json")) } pub fn new_classic<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/classic.json"))
}
/// Create a new Expanse mainnet chain spec. /// Create a new Expanse mainnet chain spec.
pub fn new_expanse(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/expanse.json")) } pub fn new_expanse<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/expanse.json"))
}
/// Create a new Kovan testnet chain spec. /// Create a new Kovan testnet chain spec.
pub fn new_kovan(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/kovan.json")) } pub fn new_kovan<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/kovan.json"))
}
/// Create a new Foundation Ropsten chain spec. /// Create a new Foundation Ropsten chain spec.
pub fn new_ropsten(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/ropsten.json")) } pub fn new_ropsten<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/ropsten.json"))
}
/// Create a new Morden chain spec. /// Create a new Morden chain spec.
pub fn new_morden(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/morden.json")) } pub fn new_morden<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/morden.json"))
}
// For tests // For tests
@ -93,6 +111,20 @@ pub fn new_byzantium_test() -> Spec { load(None, include_bytes!("../../res/ether
/// Create a new Foundation Constantinople era spec. /// Create a new Foundation Constantinople era spec.
pub fn new_constantinople_test() -> Spec { load(None, include_bytes!("../../res/ethereum/constantinople_test.json")) } pub fn new_constantinople_test() -> Spec { load(None, include_bytes!("../../res/ethereum/constantinople_test.json")) }
// For tests
/// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead.
pub fn new_frontier_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/frontier_test.json")) }
/// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier.
pub fn new_homestead_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/homestead_test.json")) }
/// Create a new Foundation Byzantium era spec.
pub fn new_byzantium_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/byzantium_test.json")) }
/// Create a new Foundation Constantinople era spec.
pub fn new_constantinople_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/constantinople_test.json")) }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use bigint::prelude::U256; use bigint::prelude::U256;

View File

@ -23,7 +23,7 @@ use bigint::hash::H256;
use util::*; use util::*;
use bytes::{Bytes, BytesRef}; use bytes::{Bytes, BytesRef};
use state::{Backend as StateBackend, State, Substate, CleanupMode}; use state::{Backend as StateBackend, State, Substate, CleanupMode};
use engines::Engine; use machine::EthereumMachine as Machine;
use vm::EnvInfo; use vm::EnvInfo;
use error::ExecutionError; use error::ExecutionError;
use evm::{CallType, Factory, Finalize, FinalizationResult}; use evm::{CallType, Factory, Finalize, FinalizationResult};
@ -154,10 +154,8 @@ impl TransactOptions<trace::NoopTracer, trace::NoopVMTracer> {
} }
} }
pub fn executor<E>(engine: &E, vm_factory: &Factory, params: &ActionParams) pub fn executor(machine: &Machine, vm_factory: &Factory, params: &ActionParams) -> Box<vm::Vm> {
-> Box<vm::Vm> where E: Engine + ?Sized if machine.supports_wasm() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) {
{
if engine.supports_wasm() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) {
Box::new( Box::new(
wasm::WasmInterpreter::new() wasm::WasmInterpreter::new()
// prefer to fail fast // prefer to fail fast
@ -169,32 +167,32 @@ pub fn executor<E>(engine: &E, vm_factory: &Factory, params: &ActionParams)
} }
/// Transaction executor. /// Transaction executor.
pub struct Executive<'a, B: 'a + StateBackend, E: 'a + Engine + ?Sized> { pub struct Executive<'a, B: 'a + StateBackend> {
state: &'a mut State<B>, state: &'a mut State<B>,
info: &'a EnvInfo, info: &'a EnvInfo,
engine: &'a E, machine: &'a Machine,
depth: usize, depth: usize,
static_flag: bool, static_flag: bool,
} }
impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> { impl<'a, B: 'a + StateBackend> Executive<'a, B> {
/// Basic constructor. /// Basic constructor.
pub fn new(state: &'a mut State<B>, info: &'a EnvInfo, engine: &'a E) -> Self { pub fn new(state: &'a mut State<B>, info: &'a EnvInfo, machine: &'a Machine) -> Self {
Executive { Executive {
state: state, state: state,
info: info, info: info,
engine: engine, machine: machine,
depth: 0, depth: 0,
static_flag: false, static_flag: false,
} }
} }
/// Populates executive from parent properties. Increments executive depth. /// Populates executive from parent properties. Increments executive depth.
pub fn from_parent(state: &'a mut State<B>, info: &'a EnvInfo, engine: &'a E, parent_depth: usize, static_flag: bool) -> Self { pub fn from_parent(state: &'a mut State<B>, info: &'a EnvInfo, machine: &'a Machine, parent_depth: usize, static_flag: bool) -> Self {
Executive { Executive {
state: state, state: state,
info: info, info: info,
engine: engine, machine: machine,
depth: parent_depth + 1, depth: parent_depth + 1,
static_flag: static_flag, static_flag: static_flag,
} }
@ -209,9 +207,9 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
tracer: &'any mut T, tracer: &'any mut T,
vm_tracer: &'any mut V, vm_tracer: &'any mut V,
static_call: bool, static_call: bool,
) -> Externalities<'any, T, V, B, E> where T: Tracer, V: VMTracer { ) -> Externalities<'any, T, V, B> where T: Tracer, V: VMTracer {
let is_static = self.static_flag || static_call; let is_static = self.static_flag || static_call;
Externalities::new(self.state, self.info, self.engine, self.depth, origin_info, substate, output, tracer, vm_tracer, is_static) Externalities::new(self.state, self.info, self.machine, self.depth, origin_info, substate, output, tracer, vm_tracer, is_static)
} }
/// This function should be used to execute transaction. /// This function should be used to execute transaction.
@ -250,7 +248,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
let sender = t.sender(); let sender = t.sender();
let nonce = self.state.nonce(&sender)?; let nonce = self.state.nonce(&sender)?;
let schedule = self.engine.schedule(self.info.number); let schedule = self.machine.schedule(self.info.number);
let base_gas_required = U256::from(t.gas_required(&schedule)); let base_gas_required = U256::from(t.gas_required(&schedule));
if t.gas < base_gas_required { if t.gas < base_gas_required {
@ -298,7 +296,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
let (result, output) = match t.action { let (result, output) = match t.action {
Action::Create => { Action::Create => {
let (new_address, code_hash) = contract_address(self.engine.create_address_scheme(self.info.number), &sender, &nonce, &t.data); let (new_address, code_hash) = contract_address(self.machine.create_address_scheme(self.info.number), &sender, &nonce, &t.data);
let params = ActionParams { let params = ActionParams {
code_address: new_address.clone(), code_address: new_address.clone(),
code_hash: code_hash, code_hash: code_hash,
@ -355,19 +353,19 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
let vm_factory = self.state.vm_factory(); let vm_factory = self.state.vm_factory();
let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call); let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call);
trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call); trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call);
return executor(self.engine, &vm_factory, &params).exec(params, &mut ext).finalize(ext); return executor(self.machine, &vm_factory, &params).exec(params, &mut ext).finalize(ext);
} }
// Start in new thread to reset stack // Start in new thread to reset stack
// TODO [todr] No thread builder yet, so we need to reset once for a while // TODO [todr] No thread builder yet, so we need to reset once for a while
// https://github.com/aturon/crossbeam/issues/16 // https://github.com/aturon/crossbeam/issues/16
crossbeam::scope(|scope| { crossbeam::scope(|scope| {
let engine = self.engine; let machine = self.machine;
let vm_factory = self.state.vm_factory(); let vm_factory = self.state.vm_factory();
let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call); let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call);
scope.spawn(move || { scope.spawn(move || {
executor(engine, &vm_factory, &params).exec(params, &mut ext).finalize(ext) executor(machine, &vm_factory, &params).exec(params, &mut ext).finalize(ext)
}) })
}).join() }).join()
} }
@ -396,7 +394,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
// backup used in case of running out of gas // backup used in case of running out of gas
self.state.checkpoint(); self.state.checkpoint();
let schedule = self.engine.schedule(self.info.number); let schedule = self.machine.schedule(self.info.number);
// at first, transfer value to destination // at first, transfer value to destination
if let ActionValue::Transfer(val) = params.value { if let ActionValue::Transfer(val) = params.value {
@ -404,7 +402,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
} }
// if destination is builtin, try to execute it // if destination is builtin, try to execute it
if let Some(builtin) = self.engine.builtin(&params.code_address, self.info.number) { if let Some(builtin) = self.machine.builtin(&params.code_address, self.info.number) {
// Engines aren't supposed to return builtins until activation, but // Engines aren't supposed to return builtins until activation, but
// prefer to fail rather than silently break consensus. // prefer to fail rather than silently break consensus.
if !builtin.is_active(self.info.number) { if !builtin.is_active(self.info.number) {
@ -542,7 +540,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
let mut unconfirmed_substate = Substate::new(); let mut unconfirmed_substate = Substate::new();
// create contract and transfer value to it if necessary // create contract and transfer value to it if necessary
let schedule = self.engine.schedule(self.info.number); let schedule = self.machine.schedule(self.info.number);
let nonce_offset = if schedule.no_empty {1} else {0}.into(); let nonce_offset = if schedule.no_empty {1} else {0}.into();
let prev_bal = self.state.balance(&params.address)?; let prev_bal = self.state.balance(&params.address)?;
if let ActionValue::Transfer(val) = params.value { if let ActionValue::Transfer(val) = params.value {
@ -591,7 +589,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
trace: Vec<FlatTrace>, trace: Vec<FlatTrace>,
vm_trace: Option<VMTrace> vm_trace: Option<VMTrace>
) -> ExecutionResult { ) -> ExecutionResult {
let schedule = self.engine.schedule(self.info.number); let schedule = self.machine.schedule(self.info.number);
// refunds from SSTORE nonzero -> zero // refunds from SSTORE nonzero -> zero
let sstore_refunds = U256::from(schedule.sstore_refund_gas) * substate.sstore_clears_count; let sstore_refunds = U256::from(schedule.sstore_refund_gas) * substate.sstore_clears_count;
@ -700,6 +698,7 @@ mod tests {
use vm::{ActionParams, ActionValue, CallType, EnvInfo, CreateContractAddress}; use vm::{ActionParams, ActionValue, CallType, EnvInfo, CreateContractAddress};
use evm::{Factory, VMType}; use evm::{Factory, VMType};
use error::ExecutionError; use error::ExecutionError;
use machine::EthereumMachine;
use state::{Substate, CleanupMode}; use state::{Substate, CleanupMode};
use tests::helpers::*; use tests::helpers::*;
use trace::trace; use trace::trace;
@ -707,6 +706,12 @@ mod tests {
use trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, VMTracer, NoopVMTracer, ExecutiveVMTracer}; use trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, VMTracer, NoopVMTracer, ExecutiveVMTracer};
use transaction::{Action, Transaction}; use transaction::{Action, Transaction};
fn make_frontier_machine(max_depth: usize) -> EthereumMachine {
let mut machine = ::ethereum::new_frontier_test_machine();
machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth));
machine
}
#[test] #[test]
fn test_contract_address() { fn test_contract_address() {
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
@ -728,11 +733,11 @@ mod tests {
let mut state = get_temp_state_with_factory(factory); let mut state = get_temp_state_with_factory(factory);
state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let mut substate = Substate::new(); let mut substate = Substate::new();
let FinalizationResult { gas_left, .. } = { let FinalizationResult { gas_left, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap()
}; };
@ -786,11 +791,11 @@ mod tests {
let mut state = get_temp_state_with_factory(factory); let mut state = get_temp_state_with_factory(factory);
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let mut substate = Substate::new(); let mut substate = Substate::new();
let FinalizationResult { gas_left, .. } = { let FinalizationResult { gas_left, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap()
}; };
@ -842,13 +847,13 @@ mod tests {
let mut state = get_temp_state(); let mut state = get_temp_state();
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let mut substate = Substate::new(); let mut substate = Substate::new();
let mut tracer = ExecutiveTracer::default(); let mut tracer = ExecutiveTracer::default();
let mut vm_tracer = ExecutiveVMTracer::toplevel(); let mut vm_tracer = ExecutiveVMTracer::toplevel();
let FinalizationResult { gas_left, .. } = { let FinalizationResult { gas_left, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
let output = BytesRef::Fixed(&mut[0u8;0]); let output = BytesRef::Fixed(&mut[0u8;0]);
ex.call(params, &mut substate, output, &mut tracer, &mut vm_tracer).unwrap() ex.call(params, &mut substate, output, &mut tracer, &mut vm_tracer).unwrap()
}; };
@ -951,13 +956,13 @@ mod tests {
let mut state = get_temp_state(); let mut state = get_temp_state();
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let mut substate = Substate::new(); let mut substate = Substate::new();
let mut tracer = ExecutiveTracer::default(); let mut tracer = ExecutiveTracer::default();
let mut vm_tracer = ExecutiveVMTracer::toplevel(); let mut vm_tracer = ExecutiveVMTracer::toplevel();
let FinalizationResult { gas_left, .. } = { let FinalizationResult { gas_left, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.create(params.clone(), &mut substate, &mut None, &mut tracer, &mut vm_tracer).unwrap() ex.create(params.clone(), &mut substate, &mut None, &mut tracer, &mut vm_tracer).unwrap()
}; };
@ -1038,11 +1043,11 @@ mod tests {
let mut state = get_temp_state_with_factory(factory); let mut state = get_temp_state_with_factory(factory);
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let mut substate = Substate::new(); let mut substate = Substate::new();
let FinalizationResult { gas_left, .. } = { let FinalizationResult { gas_left, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap()
}; };
@ -1089,11 +1094,11 @@ mod tests {
let mut state = get_temp_state_with_factory(factory); let mut state = get_temp_state_with_factory(factory);
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(1024); let machine = make_frontier_machine(1024);
let mut substate = Substate::new(); let mut substate = Substate::new();
{ {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap(); ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap();
} }
@ -1149,11 +1154,11 @@ mod tests {
state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let mut substate = Substate::new(); let mut substate = Substate::new();
let FinalizationResult { gas_left, .. } = { let FinalizationResult { gas_left, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer, &mut NoopVMTracer).unwrap() ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer, &mut NoopVMTracer).unwrap()
}; };
@ -1193,11 +1198,11 @@ mod tests {
let mut state = get_temp_state_with_factory(factory); let mut state = get_temp_state_with_factory(factory);
state.init_code(&address, code).unwrap(); state.init_code(&address, code).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let mut substate = Substate::new(); let mut substate = Substate::new();
let FinalizationResult { gas_left, .. } = { let FinalizationResult { gas_left, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer, &mut NoopVMTracer).unwrap() ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer, &mut NoopVMTracer).unwrap()
}; };
@ -1226,10 +1231,10 @@ mod tests {
state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty).unwrap();
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000); info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let executed = { let executed = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
let opts = TransactOptions::with_no_tracing(); let opts = TransactOptions::with_no_tracing();
ex.transact(&t, opts).unwrap() ex.transact(&t, opts).unwrap()
}; };
@ -1263,10 +1268,10 @@ mod tests {
state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap();
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000); info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let res = { let res = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
let opts = TransactOptions::with_no_tracing(); let opts = TransactOptions::with_no_tracing();
ex.transact(&t, opts) ex.transact(&t, opts)
}; };
@ -1296,10 +1301,10 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_used = U256::from(20_000); info.gas_used = U256::from(20_000);
info.gas_limit = U256::from(100_000); info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let res = { let res = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
let opts = TransactOptions::with_no_tracing(); let opts = TransactOptions::with_no_tracing();
ex.transact(&t, opts) ex.transact(&t, opts)
}; };
@ -1329,10 +1334,10 @@ mod tests {
state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty).unwrap();
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000); info.gas_limit = U256::from(100_000);
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let res = { let res = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
let opts = TransactOptions::with_no_tracing(); let opts = TransactOptions::with_no_tracing();
ex.transact(&t, opts) ex.transact(&t, opts)
}; };
@ -1362,11 +1367,11 @@ mod tests {
let mut state = get_temp_state_with_factory(factory); let mut state = get_temp_state_with_factory(factory);
state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let machine = make_frontier_machine(0);
let mut substate = Substate::new(); let mut substate = Substate::new();
let result = { let result = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer)
}; };
@ -1397,12 +1402,12 @@ mod tests {
let mut state = get_temp_state_with_factory(factory); let mut state = get_temp_state_with_factory(factory);
state.add_balance(&sender, &U256::from_str("152d02c7e14af68000000").unwrap(), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from_str("152d02c7e14af68000000").unwrap(), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new_byzantium(); let machine = ::ethereum::new_byzantium_test_machine();
let mut substate = Substate::new(); let mut substate = Substate::new();
let mut output = [0u8; 14]; let mut output = [0u8; 14];
let FinalizationResult { gas_left: result, .. } = { let FinalizationResult { gas_left: result, .. } = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &machine);
ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap() ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap()
}; };

View File

@ -22,7 +22,7 @@ use bigint::hash::H256;
use util::*; use util::*;
use bytes::{Bytes, BytesRef}; use bytes::{Bytes, BytesRef};
use state::{Backend as StateBackend, State, Substate, CleanupMode}; use state::{Backend as StateBackend, State, Substate, CleanupMode};
use engines::Engine; use machine::EthereumMachine as Machine;
use executive::*; use executive::*;
use vm::{ use vm::{
self, ActionParams, ActionValue, EnvInfo, CallType, Schedule, self, ActionParams, ActionValue, EnvInfo, CallType, Schedule,
@ -65,12 +65,12 @@ impl OriginInfo {
} }
/// Implementation of evm Externalities. /// Implementation of evm Externalities.
pub struct Externalities<'a, T: 'a, V: 'a, B: 'a, E: 'a + Engine + ?Sized> pub struct Externalities<'a, T: 'a, V: 'a, B: 'a>
where T: Tracer, V: VMTracer, B: StateBackend where T: Tracer, V: VMTracer, B: StateBackend
{ {
state: &'a mut State<B>, state: &'a mut State<B>,
env_info: &'a EnvInfo, env_info: &'a EnvInfo,
engine: &'a E, machine: &'a Machine,
depth: usize, depth: usize,
origin_info: OriginInfo, origin_info: OriginInfo,
substate: &'a mut Substate, substate: &'a mut Substate,
@ -81,14 +81,14 @@ pub struct Externalities<'a, T: 'a, V: 'a, B: 'a, E: 'a + Engine + ?Sized>
static_flag: bool, static_flag: bool,
} }
impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Externalities<'a, T, V, B, E> impl<'a, T: 'a, V: 'a, B: 'a> Externalities<'a, T, V, B>
where T: Tracer, V: VMTracer, B: StateBackend, E: Engine + ?Sized where T: Tracer, V: VMTracer, B: StateBackend
{ {
/// Basic `Externalities` constructor. /// Basic `Externalities` constructor.
#[cfg_attr(feature="dev", allow(too_many_arguments))] #[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn new(state: &'a mut State<B>, pub fn new(state: &'a mut State<B>,
env_info: &'a EnvInfo, env_info: &'a EnvInfo,
engine: &'a E, machine: &'a Machine,
depth: usize, depth: usize,
origin_info: OriginInfo, origin_info: OriginInfo,
substate: &'a mut Substate, substate: &'a mut Substate,
@ -100,11 +100,11 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Externalities<'a, T, V, B, E>
Externalities { Externalities {
state: state, state: state,
env_info: env_info, env_info: env_info,
engine: engine, machine: machine,
depth: depth, depth: depth,
origin_info: origin_info, origin_info: origin_info,
substate: substate, substate: substate,
schedule: engine.schedule(env_info.number), schedule: machine.schedule(env_info.number),
output: output, output: output,
tracer: tracer, tracer: tracer,
vm_tracer: vm_tracer, vm_tracer: vm_tracer,
@ -113,8 +113,8 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Externalities<'a, T, V, B, E>
} }
} }
impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E> impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
where T: Tracer, V: VMTracer, B: StateBackend, E: Engine + ?Sized where T: Tracer, V: VMTracer, B: StateBackend
{ {
fn storage_at(&self, key: &H256) -> vm::Result<H256> { fn storage_at(&self, key: &H256) -> vm::Result<H256> {
self.state.storage_at(&self.origin_info.address, key).map_err(Into::into) self.state.storage_at(&self.origin_info.address, key).map_err(Into::into)
@ -149,8 +149,8 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E>
} }
fn blockhash(&mut self, number: &U256) -> H256 { fn blockhash(&mut self, number: &U256) -> H256 {
if self.env_info.number + 256 >= self.engine.params().eip210_transition { if self.env_info.number + 256 >= self.machine.params().eip210_transition {
let blockhash_contract_address = self.engine.params().eip210_contract_address; let blockhash_contract_address = self.machine.params().eip210_contract_address;
let code_res = self.state.code(&blockhash_contract_address) let code_res = self.state.code(&blockhash_contract_address)
.and_then(|code| self.state.code_hash(&blockhash_contract_address).map(|hash| (code, hash))); .and_then(|code| self.state.code_hash(&blockhash_contract_address).map(|hash| (code, hash)));
@ -165,7 +165,7 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E>
value: ActionValue::Apparent(self.origin_info.value), value: ActionValue::Apparent(self.origin_info.value),
code_address: blockhash_contract_address.clone(), code_address: blockhash_contract_address.clone(),
origin: self.origin_info.origin.clone(), origin: self.origin_info.origin.clone(),
gas: self.engine.params().eip210_contract_gas, gas: self.machine.params().eip210_contract_gas,
gas_price: 0.into(), gas_price: 0.into(),
code: code, code: code,
code_hash: Some(code_hash), code_hash: Some(code_hash),
@ -174,7 +174,7 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E>
}; };
let mut output = H256::new(); let mut output = H256::new();
let mut ex = Executive::new(self.state, self.env_info, self.engine); let mut ex = Executive::new(self.state, self.env_info, self.machine);
let r = ex.call(params, self.substate, BytesRef::Fixed(&mut output), self.tracer, self.vm_tracer); let r = ex.call(params, self.substate, BytesRef::Fixed(&mut output), self.tracer, self.vm_tracer);
trace!("ext: blockhash contract({}) -> {:?}({}) self.env_info.number={}\n", number, r, output, self.env_info.number); trace!("ext: blockhash contract({}) -> {:?}({}) self.env_info.number={}\n", number, r, output, self.env_info.number);
output output
@ -229,7 +229,7 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E>
} }
} }
} }
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth, self.static_flag); let mut ex = Executive::from_parent(self.state, self.env_info, self.machine, self.depth, self.static_flag);
// TODO: handle internal error separately // TODO: handle internal error separately
match ex.create(params, self.substate, &mut None, self.tracer, self.vm_tracer) { match ex.create(params, self.substate, &mut None, self.tracer, self.vm_tracer) {
@ -283,7 +283,7 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E>
params.value = ActionValue::Transfer(value); params.value = ActionValue::Transfer(value);
} }
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth, self.static_flag); let mut ex = Executive::from_parent(self.state, self.env_info, self.machine, self.depth, self.static_flag);
match ex.call(params, self.substate, BytesRef::Fixed(output), self.tracer, self.vm_tracer) { match ex.call(params, self.substate, BytesRef::Fixed(output), self.tracer, self.vm_tracer) {
Ok(FinalizationResult{ gas_left, return_data, apply_state: true }) => MessageCallResult::Success(gas_left, return_data), Ok(FinalizationResult{ gas_left, return_data, apply_state: true }) => MessageCallResult::Success(gas_left, return_data),
@ -414,7 +414,6 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E>
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use util::*; use util::*;
use engines::Engine;
use evm::{EnvInfo, Ext, CallType}; use evm::{EnvInfo, Ext, CallType};
use state::{State, Substate}; use state::{State, Substate};
use tests::helpers::*; use tests::helpers::*;
@ -444,7 +443,7 @@ mod tests {
struct TestSetup { struct TestSetup {
state: State<::state_db::StateDB>, state: State<::state_db::StateDB>,
engine: Arc<Engine>, machine: ::machine::EthereumMachine,
sub_state: Substate, sub_state: Substate,
env_info: EnvInfo env_info: EnvInfo
} }
@ -459,7 +458,7 @@ mod tests {
fn new() -> Self { fn new() -> Self {
TestSetup { TestSetup {
state: get_temp_state(), state: get_temp_state(),
engine: get_test_spec().engine, machine: ::spec::Spec::new_test_machine(),
sub_state: Substate::new(), sub_state: Substate::new(),
env_info: get_test_env_info() env_info: get_test_env_info()
} }
@ -473,7 +472,7 @@ mod tests {
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
let ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); let ext = Externalities::new(state, &setup.env_info, &setup.machine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false);
assert_eq!(ext.env_info().number, 100); assert_eq!(ext.env_info().number, 100);
} }
@ -485,7 +484,7 @@ mod tests {
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false);
let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::<U256>().unwrap()); let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::<U256>().unwrap());
@ -509,7 +508,7 @@ mod tests {
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false);
let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::<U256>().unwrap()); let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::<U256>().unwrap());
@ -524,7 +523,7 @@ mod tests {
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false);
let mut output = vec![]; let mut output = vec![];
@ -552,7 +551,7 @@ mod tests {
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
{ {
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false);
ext.log(log_topics, &log_data).unwrap(); ext.log(log_topics, &log_data).unwrap();
} }
@ -569,7 +568,7 @@ mod tests {
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
{ {
let mut ext = Externalities::new(state, &setup.env_info, &*setup.engine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false);
ext.suicide(refund_account).unwrap(); ext.suicide(refund_account).unwrap();
} }

View File

@ -312,6 +312,23 @@ impl HeapSizeOf for Header {
} }
} }
impl ::parity_machine::Header for Header {
fn bare_hash(&self) -> H256 { Header::bare_hash(self) }
fn hash(&self) -> H256 { Header::hash(self) }
fn seal(&self) -> &[Vec<u8>] { Header::seal(self) }
fn author(&self) -> &Address { Header::author(self) }
fn number(&self) -> BlockNumber { Header::number(self) }
}
impl ::parity_machine::ScoredHeader for Header {
fn score(&self) -> &U256 { self.difficulty() }
fn set_score(&mut self, score: U256) { self.set_difficulty(score) }
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rustc_hex::FromHex; use rustc_hex::FromHex;

View File

@ -18,7 +18,6 @@ use std::sync::Arc;
use super::test_common::*; use super::test_common::*;
use state::{Backend as StateBackend, State, Substate}; use state::{Backend as StateBackend, State, Substate};
use executive::*; use executive::*;
use engines::Engine;
use evm::{VMType, Finalize}; use evm::{VMType, Finalize};
use vm::{ use vm::{
self, ActionParams, CallType, Schedule, Ext, self, ActionParams, CallType, Schedule, Ext,
@ -34,6 +33,7 @@ use bytes::{Bytes, BytesRef};
use trie; use trie;
use rlp::RlpStream; use rlp::RlpStream;
use hash::keccak; use hash::keccak;
use machine::EthereumMachine as Machine;
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
struct CallCreate { struct CallCreate {
@ -57,22 +57,22 @@ impl From<ethjson::vm::Call> for CallCreate {
/// Tiny wrapper around executive externalities. /// Tiny wrapper around executive externalities.
/// Stores callcreates. /// Stores callcreates.
struct TestExt<'a, T: 'a, V: 'a, B: 'a, E: 'a> struct TestExt<'a, T: 'a, V: 'a, B: 'a>
where T: Tracer, V: VMTracer, B: StateBackend, E: Engine + ?Sized where T: Tracer, V: VMTracer, B: StateBackend
{ {
ext: Externalities<'a, T, V, B, E>, ext: Externalities<'a, T, V, B>,
callcreates: Vec<CallCreate>, callcreates: Vec<CallCreate>,
nonce: U256, nonce: U256,
sender: Address, sender: Address,
} }
impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> TestExt<'a, T, V, B, E> impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B>
where T: Tracer, V: VMTracer, B: StateBackend, E: Engine + ?Sized where T: Tracer, V: VMTracer, B: StateBackend,
{ {
fn new( fn new(
state: &'a mut State<B>, state: &'a mut State<B>,
info: &'a EnvInfo, info: &'a EnvInfo,
engine: &'a E, machine: &'a Machine,
depth: usize, depth: usize,
origin_info: OriginInfo, origin_info: OriginInfo,
substate: &'a mut Substate, substate: &'a mut Substate,
@ -84,15 +84,15 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> TestExt<'a, T, V, B, E>
let static_call = false; let static_call = false;
Ok(TestExt { Ok(TestExt {
nonce: state.nonce(&address)?, nonce: state.nonce(&address)?,
ext: Externalities::new(state, info, engine, depth, origin_info, substate, output, tracer, vm_tracer, static_call), ext: Externalities::new(state, info, machine, depth, origin_info, substate, output, tracer, vm_tracer, static_call),
callcreates: vec![], callcreates: vec![],
sender: address, sender: address,
}) })
} }
} }
impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for TestExt<'a, T, V, B, E> impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B>
where T: Tracer, V: VMTracer, B: StateBackend, E: Engine + ?Sized where T: Tracer, V: VMTracer, B: StateBackend
{ {
fn storage_at(&self, key: &H256) -> vm::Result<H256> { fn storage_at(&self, key: &H256) -> vm::Result<H256> {
self.ext.storage_at(key) self.ext.storage_at(key)
@ -231,7 +231,12 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
let mut state = get_temp_state(); let mut state = get_temp_state();
state.populate_from(From::from(vm.pre_state.clone())); state.populate_from(From::from(vm.pre_state.clone()));
let info = From::from(vm.env); let info = From::from(vm.env);
let engine = TestEngine::new(1); let machine = {
let mut machine = ::ethereum::new_frontier_test_machine();
machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = 1));
machine
};
let params = ActionParams::from(vm.transaction); let params = ActionParams::from(vm.transaction);
let mut substate = Substate::new(); let mut substate = Substate::new();
@ -245,7 +250,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
let mut ex = try_fail!(TestExt::new( let mut ex = try_fail!(TestExt::new(
&mut state, &mut state,
&info, &info,
&engine, &machine,
0, 0,
OriginInfo::from(&params), OriginInfo::from(&params),
&mut substate, &mut substate,

View File

@ -52,7 +52,8 @@ pub fn run_test_file(path: &Path, runner: fn (json_data: &[u8]) -> Vec<String>)
let mut file = File::open(&path).expect("Error opening test file"); let mut file = File::open(&path).expect("Error opening test file");
file.read_to_end(&mut data).expect("Error reading test file"); file.read_to_end(&mut data).expect("Error reading test file");
let results = runner(&data); let results = runner(&data);
assert!(results.is_empty()); let empty: [String; 0] = [];
assert_eq!(results, empty);
} }
macro_rules! test { macro_rules! test {

View File

@ -101,6 +101,7 @@ extern crate lru_cache;
extern crate native_contracts; extern crate native_contracts;
extern crate num_cpus; extern crate num_cpus;
extern crate num; extern crate num;
extern crate parity_machine;
extern crate parking_lot; extern crate parking_lot;
extern crate price_info; extern crate price_info;
extern crate rand; extern crate rand;
@ -154,6 +155,7 @@ pub mod error;
pub mod ethereum; pub mod ethereum;
pub mod executed; pub mod executed;
pub mod header; pub mod header;
pub mod machine;
pub mod migrations; pub mod migrations;
pub mod miner; pub mod miner;
pub mod pod_state; pub mod pod_state;

531
ethcore/src/machine.rs Normal file
View File

@ -0,0 +1,531 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethereum-like state machine definition.
use std::collections::BTreeMap;
use std::cmp;
use std::sync::Arc;
use block::ExecutedBlock;
use builtin::Builtin;
use client::BlockChainClient;
use error::{Error, TransactionError};
use executive::Executive;
use header::{BlockNumber, Header};
use spec::CommonParams;
use state::{CleanupMode, Substate};
use trace::{NoopTracer, NoopVMTracer, Tracer, ExecutiveTracer, RewardType};
use transaction::{SYSTEM_ADDRESS, UnverifiedTransaction, SignedTransaction};
use tx_filter::TransactionFilter;
use bigint::prelude::U256;
use bytes::BytesRef;
use util::Address;
use vm::{CallType, ActionParams, ActionValue};
use vm::{EnvInfo, Schedule, CreateContractAddress};
/// Parity tries to round block.gas_limit to multiple of this constant
pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]);
/// Ethash-specific extensions.
#[derive(Debug, Clone)]
pub struct EthashExtensions {
/// Homestead transition block number.
pub homestead_transition: BlockNumber,
/// EIP150 transition block number.
pub eip150_transition: BlockNumber,
/// Number of first block where EIP-160 rules begin.
pub eip160_transition: u64,
/// Number of first block where EIP-161.abc begin.
pub eip161abc_transition: u64,
/// Number of first block where EIP-161.d begins.
pub eip161d_transition: u64,
/// DAO hard-fork transition block (X).
pub dao_hardfork_transition: u64,
/// DAO hard-fork refund contract address (C).
pub dao_hardfork_beneficiary: Address,
/// DAO hard-fork DAO accounts list (L)
pub dao_hardfork_accounts: Vec<Address>,
}
impl From<::ethjson::spec::EthashParams> for EthashExtensions {
fn from(p: ::ethjson::spec::EthashParams) -> Self {
EthashExtensions {
homestead_transition: p.homestead_transition.map_or(0, Into::into),
eip150_transition: p.eip150_transition.map_or(0, Into::into),
eip160_transition: p.eip160_transition.map_or(0, Into::into),
eip161abc_transition: p.eip161abc_transition.map_or(0, Into::into),
eip161d_transition: p.eip161d_transition.map_or(u64::max_value(), Into::into),
dao_hardfork_transition: p.dao_hardfork_transition.map_or(u64::max_value(), Into::into),
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into),
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(),
}
}
}
/// Special rules to be applied to the schedule.
pub type ScheduleCreationRules = Fn(&mut Schedule, BlockNumber) + Sync + Send;
/// An ethereum-like state machine.
pub struct EthereumMachine {
params: CommonParams,
builtins: Arc<BTreeMap<Address, Builtin>>,
tx_filter: Option<Arc<TransactionFilter>>,
ethash_extensions: Option<EthashExtensions>,
schedule_rules: Option<Box<ScheduleCreationRules>>,
}
impl EthereumMachine {
/// Regular ethereum machine.
pub fn regular(params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> EthereumMachine {
let tx_filter = TransactionFilter::from_params(&params).map(Arc::new);
EthereumMachine {
params: params,
builtins: Arc::new(builtins),
tx_filter: tx_filter,
ethash_extensions: None,
schedule_rules: None,
}
}
/// Ethereum machine with ethash extensions.
// TODO: either unify or specify to mainnet specifically and include other specific-chain HFs?
pub fn with_ethash_extensions(params: CommonParams, builtins: BTreeMap<Address, Builtin>, extensions: EthashExtensions) -> EthereumMachine {
let mut machine = EthereumMachine::regular(params, builtins);
machine.ethash_extensions = Some(extensions);
machine
}
/// Attach special rules to the creation of schedule.
pub fn set_schedule_creation_rules(&mut self, rules: Box<ScheduleCreationRules>) {
self.schedule_rules = Some(rules);
}
/// Get a reference to the ethash-specific extensions.
pub fn ethash_extensions(&self) -> Option<&EthashExtensions> {
self.ethash_extensions.as_ref()
}
}
impl EthereumMachine {
/// Execute a call as the system address.
pub fn execute_as_system(
&self,
block: &mut ExecutedBlock,
contract_address: Address,
gas: U256,
data: Option<Vec<u8>>,
) -> Result<Vec<u8>, Error> {
let env_info = {
let mut env_info = block.env_info();
env_info.gas_limit = env_info.gas_used + gas;
env_info
};
let mut state = block.fields_mut().state;
let params = ActionParams {
code_address: contract_address.clone(),
address: contract_address.clone(),
sender: SYSTEM_ADDRESS.clone(),
origin: SYSTEM_ADDRESS.clone(),
gas: gas,
gas_price: 0.into(),
value: ActionValue::Transfer(0.into()),
code: state.code(&contract_address)?,
code_hash: Some(state.code_hash(&contract_address)?),
data: data,
call_type: CallType::Call,
};
let mut ex = Executive::new(&mut state, &env_info, self);
let mut substate = Substate::new();
let mut output = Vec::new();
if let Err(e) = ex.call(params, &mut substate, BytesRef::Flexible(&mut output), &mut NoopTracer, &mut NoopVMTracer) {
warn!("Encountered error on making system call: {}", e);
}
Ok(output)
}
/// Push last known block hash to the state.
fn push_last_hash(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
let params = self.params();
if block.fields().header.number() == params.eip210_transition {
let state = block.fields_mut().state;
state.init_code(&params.eip210_contract_address, params.eip210_contract_code.clone())?;
}
if block.fields().header.number() >= params.eip210_transition {
let parent_hash = block.fields().header.parent_hash().clone();
let _ = self.execute_as_system(
block,
params.eip210_contract_address,
params.eip210_contract_gas,
Some(parent_hash.to_vec()),
)?;
}
Ok(())
}
/// Logic to perform on a new block: updating last hashes and the DAO
/// fork, for ethash.
pub fn on_new_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
self.push_last_hash(block)?;
if let Some(ref ethash_params) = self.ethash_extensions {
if block.fields().header.number() == ethash_params.dao_hardfork_transition {
let state = block.fields_mut().state;
for child in &ethash_params.dao_hardfork_accounts {
let beneficiary = &ethash_params.dao_hardfork_beneficiary;
state.balance(child)
.and_then(|b| state.transfer_balance(child, beneficiary, &b, CleanupMode::NoEmpty))?;
}
}
}
Ok(())
}
/// Populate a header's fields based on its parent's header.
/// Usually implements the chain scoring rule based on weight.
/// The gas floor target must not be lower than the engine's minimum gas limit.
pub fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, gas_ceil_target: U256) {
header.set_difficulty(parent.difficulty().clone());
if let Some(ref ethash_params) = self.ethash_extensions {
let gas_limit = {
let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.params().gas_limit_bound_divisor;
let lower_limit = gas_limit - gas_limit / bound_divisor + 1.into();
let upper_limit = gas_limit + gas_limit / bound_divisor - 1.into();
let gas_limit = if gas_limit < gas_floor_target {
let gas_limit = cmp::min(gas_floor_target, upper_limit);
round_block_gas_limit(gas_limit, lower_limit, upper_limit)
} else if gas_limit > gas_ceil_target {
let gas_limit = cmp::max(gas_ceil_target, lower_limit);
round_block_gas_limit(gas_limit, lower_limit, upper_limit)
} else {
let total_lower_limit = cmp::max(lower_limit, gas_floor_target);
let total_upper_limit = cmp::min(upper_limit, gas_ceil_target);
let gas_limit = cmp::max(gas_floor_target, cmp::min(total_upper_limit,
lower_limit + (header.gas_used().clone() * 6.into() / 5.into()) / bound_divisor));
round_block_gas_limit(gas_limit, total_lower_limit, total_upper_limit)
};
// ensure that we are not violating protocol limits
debug_assert!(gas_limit >= lower_limit);
debug_assert!(gas_limit <= upper_limit);
gas_limit
};
header.set_gas_limit(gas_limit);
if header.number() >= ethash_params.dao_hardfork_transition &&
header.number() <= ethash_params.dao_hardfork_transition + 9 {
header.set_extra_data(b"dao-hard-fork"[..].to_owned());
}
return
}
header.set_gas_limit({
let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.params().gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
cmp::min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
cmp::max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
}
});
}
/// Get the general parameters of the chain.
pub fn params(&self) -> &CommonParams {
&self.params
}
/// Get the EVM schedule for the given block number.
pub fn schedule(&self, block_number: BlockNumber) -> Schedule {
let mut schedule = match self.ethash_extensions {
None => self.params.schedule(block_number),
Some(ref ext) => {
if block_number < ext.homestead_transition {
Schedule::new_frontier()
} else if block_number < ext.eip150_transition {
Schedule::new_homestead()
} else {
/// There's no max_code_size transition so we tie it to eip161abc
let max_code_size = if block_number >= ext.eip161abc_transition {
self.params.max_code_size as usize
} else {
usize::max_value()
};
let mut schedule = Schedule::new_post_eip150(
max_code_size,
block_number >= ext.eip160_transition,
block_number >= ext.eip161abc_transition,
block_number >= ext.eip161d_transition
);
self.params.update_schedule(block_number, &mut schedule);
schedule
}
}
};
if let Some(ref rules) = self.schedule_rules {
(rules)(&mut schedule, block_number)
}
schedule
}
/// Builtin-contracts for the chain..
pub fn builtins(&self) -> &BTreeMap<Address, Builtin> {
&*self.builtins
}
/// Attempt to get a handle to a built-in contract.
/// Only returns references to activated built-ins.
// TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic
// from Spec into here and removing the Spec::builtins field.
pub fn builtin(&self, a: &Address, block_number: BlockNumber) -> Option<&Builtin> {
self.builtins()
.get(a)
.and_then(|b| if b.is_active(block_number) { Some(b) } else { None })
}
/// Some intrinsic operation parameters; by default they take their value from the `spec()`'s `engine_params`.
pub fn maximum_extra_data_size(&self) -> usize { self.params().maximum_extra_data_size }
/// The nonce with which accounts begin at given block.
pub fn account_start_nonce(&self, block: u64) -> U256 {
let params = self.params();
if block >= params.dust_protection_transition {
U256::from(params.nonce_cap_increment) * U256::from(block)
} else {
params.account_start_nonce
}
}
/// The network ID that transactions should be signed with.
pub fn signing_chain_id(&self, env_info: &EnvInfo) -> Option<u64> {
let params = self.params();
if env_info.number >= params.eip155_transition {
Some(params.chain_id)
} else {
None
}
}
/// Returns new contract address generation scheme at given block number.
pub fn create_address_scheme(&self, number: BlockNumber) -> CreateContractAddress {
if number >= self.params().eip86_transition {
CreateContractAddress::FromCodeHash
} else {
CreateContractAddress::FromSenderAndNonce
}
}
/// Verify a particular transaction is valid, regardless of order.
pub fn verify_transaction_unordered(&self, t: UnverifiedTransaction, _header: &Header) -> Result<SignedTransaction, Error> {
SignedTransaction::new(t)
}
/// Does basic verification of the transaction.
pub fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), Error> {
let check_low_s = match self.ethash_extensions {
Some(ref ext) => header.number() >= ext.homestead_transition,
None => true,
};
let chain_id = if header.number() >= self.params().eip155_transition {
Some(self.params().chain_id)
} else {
None
};
t.verify_basic(check_low_s, chain_id, false)?;
Ok(())
}
/// Does verification of the transaction against the parent state.
// TODO: refine the bound here to be a "state provider" or similar as opposed
// to full client functionality.
pub fn verify_transaction(&self, t: &SignedTransaction, header: &Header, client: &BlockChainClient) -> Result<(), Error> {
if let Some(ref filter) = self.tx_filter.as_ref() {
if !filter.transaction_allowed(header.parent_hash(), t, client) {
return Err(TransactionError::NotAllowed.into())
}
}
Ok(())
}
/// If this machine supports wasm.
pub fn supports_wasm(&self) -> bool {
self.params().wasm
}
}
/// Auxiliary data fetcher for an Ethereum machine. In Ethereum-like machines
/// there are two kinds of auxiliary data: bodies and receipts.
#[derive(Default, Clone)]
pub struct AuxiliaryData<'a> {
/// The full block bytes, including the header.
pub bytes: Option<&'a [u8]>,
/// The block receipts.
pub receipts: Option<&'a [::receipt::Receipt]>,
}
/// Type alias for a function we can make calls through synchronously.
/// Returns the call result and state proof for each call.
pub type Call<'a> = Fn(Address, Vec<u8>) -> Result<(Vec<u8>, Vec<Vec<u8>>), String> + 'a;
/// Request for auxiliary data of a block.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum AuxiliaryRequest {
/// Needs the body.
Body,
/// Needs the receipts.
Receipts,
/// Needs both body and receipts.
Both,
}
impl ::parity_machine::Machine for EthereumMachine {
type Header = Header;
type LiveBlock = ExecutedBlock;
type EngineClient = ::client::EngineClient;
type AuxiliaryRequest = AuxiliaryRequest;
type Error = Error;
}
impl<'a> ::parity_machine::LocalizedMachine<'a> for EthereumMachine {
type StateContext = Call<'a>;
type AuxiliaryData = AuxiliaryData<'a>;
}
impl ::parity_machine::WithBalances for EthereumMachine {
fn balance(&self, live: &ExecutedBlock, address: &Address) -> Result<U256, Error> {
live.fields().state.balance(address).map_err(Into::into)
}
fn add_balance(&self, live: &mut ExecutedBlock, address: &Address, amount: &U256) -> Result<(), Error> {
live.fields_mut().state.add_balance(address, amount, CleanupMode::NoEmpty).map_err(Into::into)
}
fn note_rewards(
&self,
live: &mut Self::LiveBlock,
direct: &[(Address, U256)],
indirect: &[(Address, U256)],
) -> Result<(), Self::Error> {
use block::IsBlock;
if !live.tracing_enabled() { return Ok(()) }
let mut tracer = ExecutiveTracer::default();
for &(address, amount) in direct {
tracer.trace_reward(address, amount, RewardType::Block);
}
for &(address, amount) in indirect {
tracer.trace_reward(address, amount, RewardType::Uncle);
}
live.fields_mut().push_traces(tracer);
Ok(())
}
}
// Try to round gas_limit a bit so that:
// 1) it will still be in desired range
// 2) it will be a nearest (with tendency to increase) multiple of PARITY_GAS_LIMIT_DETERMINANT
fn round_block_gas_limit(gas_limit: U256, lower_limit: U256, upper_limit: U256) -> U256 {
let increased_gas_limit = gas_limit + (PARITY_GAS_LIMIT_DETERMINANT - gas_limit % PARITY_GAS_LIMIT_DETERMINANT);
if increased_gas_limit > upper_limit {
let decreased_gas_limit = increased_gas_limit - PARITY_GAS_LIMIT_DETERMINANT;
if decreased_gas_limit < lower_limit {
gas_limit
} else {
decreased_gas_limit
}
} else {
increased_gas_limit
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ethash_gas_limit_is_multiple_of_determinant() {
use bigint::prelude::U256;
let spec = ::ethereum::new_homestead_test();
let ethparams = ::tests::helpers::get_default_ethash_extensions();
let machine = EthereumMachine::with_ethash_extensions(
spec.params().clone(),
Default::default(),
ethparams,
);
let mut parent = ::header::Header::new();
let mut header = ::header::Header::new();
header.set_number(1);
// this test will work for this constant only
assert_eq!(PARITY_GAS_LIMIT_DETERMINANT, U256::from(37));
// when parent.gas_limit < gas_floor_target:
parent.set_gas_limit(U256::from(50_000));
machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000));
assert_eq!(*header.gas_limit(), U256::from(50_024));
// when parent.gas_limit > gas_ceil_target:
parent.set_gas_limit(U256::from(250_000));
machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000));
assert_eq!(*header.gas_limit(), U256::from(249_787));
// when parent.gas_limit is in miner's range
header.set_gas_used(U256::from(150_000));
parent.set_gas_limit(U256::from(150_000));
machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(200_000));
assert_eq!(*header.gas_limit(), U256::from(150_035));
// when parent.gas_limit is in miner's range
// && we can NOT increase it to be multiple of constant
header.set_gas_used(U256::from(150_000));
parent.set_gas_limit(U256::from(150_000));
machine.populate_from_parent(&mut header, &parent, U256::from(100_000), U256::from(150_002));
assert_eq!(*header.gas_limit(), U256::from(149_998));
// when parent.gas_limit is in miner's range
// && we can NOT increase it to be multiple of constant
// && we can NOT decrease it to be multiple of constant
header.set_gas_used(U256::from(150_000));
parent.set_gas_limit(U256::from(150_000));
machine.populate_from_parent(&mut header, &parent, U256::from(150_000), U256::from(150_002));
assert_eq!(*header.gas_limit(), U256::from(150_002));
}
}

View File

@ -35,7 +35,7 @@ use error::*;
use transaction::{Action, UnverifiedTransaction, PendingTransaction, SignedTransaction, Condition as TransactionCondition}; use transaction::{Action, UnverifiedTransaction, PendingTransaction, SignedTransaction, Condition as TransactionCondition};
use receipt::{Receipt, RichReceipt}; use receipt::{Receipt, RichReceipt};
use spec::Spec; use spec::Spec;
use engines::{Engine, Seal}; use engines::{EthEngine, Seal};
use miner::{MinerService, MinerStatus, TransactionQueue, RemovalReason, TransactionQueueDetailsProvider, PrioritizationStrategy, use miner::{MinerService, MinerStatus, TransactionQueue, RemovalReason, TransactionQueueDetailsProvider, PrioritizationStrategy,
AccountDetails, TransactionOrigin}; AccountDetails, TransactionOrigin};
use miner::banning_queue::{BanningTransactionQueue, Threshold}; use miner::banning_queue::{BanningTransactionQueue, Threshold};
@ -240,7 +240,7 @@ pub struct Miner {
gas_range_target: RwLock<(U256, U256)>, gas_range_target: RwLock<(U256, U256)>,
author: RwLock<Address>, author: RwLock<Address>,
extra_data: RwLock<Bytes>, extra_data: RwLock<Bytes>,
engine: Arc<Engine>, engine: Arc<EthEngine>,
accounts: Option<Arc<AccountProvider>>, accounts: Option<Arc<AccountProvider>>,
notifiers: RwLock<Vec<Box<NotifyWork>>>, notifiers: RwLock<Vec<Box<NotifyWork>>>,
@ -662,7 +662,7 @@ impl Miner {
return Err(Error::Transaction(TransactionError::AlreadyImported)); return Err(Error::Transaction(TransactionError::AlreadyImported));
} }
match self.engine.verify_transaction_basic(&tx, &best_block_header) match self.engine.verify_transaction_basic(&tx, &best_block_header)
.and_then(|_| self.engine.verify_transaction(tx, &best_block_header)) .and_then(|_| self.engine.verify_transaction_unordered(tx, &best_block_header))
{ {
Err(e) => { Err(e) => {
debug!(target: "miner", "Rejected tx {:?} with invalid signature: {:?}", hash, e); debug!(target: "miner", "Rejected tx {:?} with invalid signature: {:?}", hash, e);

View File

@ -171,7 +171,7 @@ impl StratumJobDispatcher {
fn payload(&self, pow_hash: H256, difficulty: U256, number: u64) -> String { fn payload(&self, pow_hash: H256, difficulty: U256, number: u64) -> String {
// TODO: move this to engine // TODO: move this to engine
let target = Ethash::difficulty_to_boundary(&difficulty); let target = Ethash::difficulty_to_boundary(&difficulty);
let seed_hash = &self.seed_compute.lock().get_seedhash(number); let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]); let seed_hash = H256::from_slice(&seed_hash[..]);
format!( format!(
r#"["0x", "0x{}","0x{}","0x{}","0x{:x}"]"#, r#"["0x", "0x{}","0x{}","0x{}","0x{:x}"]"#,

View File

@ -72,7 +72,7 @@ impl NotifyWork for WorkPoster {
fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) { fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) {
// TODO: move this to engine // TODO: move this to engine
let target = Ethash::difficulty_to_boundary(&difficulty); let target = Ethash::difficulty_to_boundary(&difficulty);
let seed_hash = &self.seed_compute.lock().get_seedhash(number); let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]); let seed_hash = H256::from_slice(&seed_hash[..]);
let body = format!( let body = format!(
r#"{{ "result": ["0x{}","0x{}","0x{}","0x{:x}"] }}"#, r#"{{ "result": ["0x{}","0x{}","0x{}","0x{:x}"] }}"#,

View File

@ -25,7 +25,8 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use blockchain::{BlockChain, BlockProvider}; use blockchain::{BlockChain, BlockProvider};
use engines::{Engine, EpochVerifier, EpochTransition}; use engines::{EthEngine, EpochVerifier, EpochTransition};
use machine::EthereumMachine;
use ids::BlockId; use ids::BlockId;
use header::Header; use header::Header;
use receipt::Receipt; use receipt::Receipt;
@ -168,7 +169,7 @@ struct ChunkRebuilder {
// and epoch data from last blocks in chunks. // and epoch data from last blocks in chunks.
// verification for these will be done at the end. // verification for these will be done at the end.
unverified_firsts: Vec<(Header, Bytes, H256)>, unverified_firsts: Vec<(Header, Bytes, H256)>,
last_epochs: Vec<(Header, Box<EpochVerifier>)>, last_epochs: Vec<(Header, Box<EpochVerifier<EthereumMachine>>)>,
} }
// verified data. // verified data.
@ -180,9 +181,9 @@ struct Verified {
impl ChunkRebuilder { impl ChunkRebuilder {
fn verify_transition( fn verify_transition(
&mut self, &mut self,
last_verifier: &mut Option<Box<EpochVerifier>>, last_verifier: &mut Option<Box<EpochVerifier<EthereumMachine>>>,
transition_rlp: UntrustedRlp, transition_rlp: UntrustedRlp,
engine: &Engine, engine: &EthEngine,
) -> Result<Verified, ::error::Error> { ) -> Result<Verified, ::error::Error> {
use engines::ConstructedVerifier; use engines::ConstructedVerifier;
@ -238,7 +239,7 @@ impl Rebuilder for ChunkRebuilder {
fn feed( fn feed(
&mut self, &mut self,
chunk: &[u8], chunk: &[u8],
engine: &Engine, engine: &EthEngine,
abort_flag: &AtomicBool, abort_flag: &AtomicBool,
) -> Result<(), ::error::Error> { ) -> Result<(), ::error::Error> {
let rlp = UntrustedRlp::new(chunk); let rlp = UntrustedRlp::new(chunk);
@ -346,7 +347,7 @@ impl Rebuilder for ChunkRebuilder {
Ok(()) Ok(())
} }
fn finalize(&mut self, _engine: &Engine) -> Result<(), ::error::Error> { fn finalize(&mut self, _engine: &EthEngine) -> Result<(), ::error::Error> {
if !self.had_genesis { if !self.had_genesis {
return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into());
} }

View File

@ -21,7 +21,7 @@ use std::sync::atomic::AtomicBool;
use std::sync::Arc; use std::sync::Arc;
use blockchain::BlockChain; use blockchain::BlockChain;
use engines::Engine; use engines::EthEngine;
use snapshot::{Error, ManifestData}; use snapshot::{Error, ManifestData};
use bigint::hash::H256; use bigint::hash::H256;
@ -84,7 +84,7 @@ pub trait Rebuilder: Send {
fn feed( fn feed(
&mut self, &mut self,
chunk: &[u8], chunk: &[u8],
engine: &Engine, engine: &EthEngine,
abort_flag: &AtomicBool, abort_flag: &AtomicBool,
) -> Result<(), ::error::Error>; ) -> Result<(), ::error::Error>;
@ -93,5 +93,5 @@ pub trait Rebuilder: Send {
/// ///
/// This should apply the necessary "glue" between chunks, /// This should apply the necessary "glue" between chunks,
/// and verify against the restored state. /// and verify against the restored state.
fn finalize(&mut self, engine: &Engine) -> Result<(), ::error::Error>; fn finalize(&mut self, engine: &EthEngine) -> Result<(), ::error::Error>;
} }

View File

@ -27,7 +27,7 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use blockchain::{BlockChain, BlockProvider}; use blockchain::{BlockChain, BlockProvider};
use engines::Engine; use engines::EthEngine;
use snapshot::{Error, ManifestData}; use snapshot::{Error, ManifestData};
use snapshot::block::AbridgedBlock; use snapshot::block::AbridgedBlock;
use bigint::hash::H256; use bigint::hash::H256;
@ -219,7 +219,7 @@ impl PowRebuilder {
impl Rebuilder for PowRebuilder { impl Rebuilder for PowRebuilder {
/// Feed the rebuilder an uncompressed block chunk. /// Feed the rebuilder an uncompressed block chunk.
/// Returns the number of blocks fed or any errors. /// Returns the number of blocks fed or any errors.
fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> { fn feed(&mut self, chunk: &[u8], engine: &EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
use basic_types::Seal::With; use basic_types::Seal::With;
use views::BlockView; use views::BlockView;
use snapshot::verify_old_block; use snapshot::verify_old_block;
@ -271,7 +271,6 @@ impl Rebuilder for PowRebuilder {
&block.header, &block.header,
engine, engine,
&self.chain, &self.chain,
Some(&block_bytes),
is_best is_best
)?; )?;
@ -298,7 +297,7 @@ impl Rebuilder for PowRebuilder {
} }
/// Glue together any disconnected chunks and check that the chain is complete. /// Glue together any disconnected chunks and check that the chain is complete.
fn finalize(&mut self, _: &Engine) -> Result<(), ::error::Error> { fn finalize(&mut self, _: &EthEngine) -> Result<(), ::error::Error> {
let mut batch = self.db.transaction(); let mut batch = self.db.transaction();
for (first_num, first_hash) in self.disconnected.drain(..) { for (first_num, first_hash) in self.disconnected.drain(..) {

View File

@ -26,7 +26,7 @@ use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY};
use account_db::{AccountDB, AccountDBMut}; use account_db::{AccountDB, AccountDBMut};
use blockchain::{BlockChain, BlockProvider}; use blockchain::{BlockChain, BlockProvider};
use engines::Engine; use engines::EthEngine;
use header::Header; use header::Header;
use ids::BlockId; use ids::BlockId;
@ -126,7 +126,7 @@ impl Progress {
} }
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer. /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
pub fn take_snapshot<W: SnapshotWriter + Send>( pub fn take_snapshot<W: SnapshotWriter + Send>(
engine: &Engine, engine: &EthEngine,
chain: &BlockChain, chain: &BlockChain,
block_at: H256, block_at: H256,
state_db: &HashDB, state_db: &HashDB,
@ -484,13 +484,13 @@ const POW_VERIFY_RATE: f32 = 0.02;
/// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform /// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform
/// the fullest verification possible. If not, it will take a random sample to determine whether it will /// the fullest verification possible. If not, it will take a random sample to determine whether it will
/// do heavy or light verification. /// do heavy or light verification.
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &Engine, chain: &BlockChain, body: Option<&[u8]>, always: bool) -> Result<(), ::error::Error> { pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> {
engine.verify_block_basic(header, body)?; engine.verify_block_basic(header)?;
if always || rng.gen::<f32>() <= POW_VERIFY_RATE { if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
engine.verify_block_unordered(header, body)?; engine.verify_block_unordered(header)?;
match chain.block_header(header.parent_hash()) { match chain.block_header(header.parent_hash()) {
Some(parent) => engine.verify_block_family(header, &parent, body), Some(parent) => engine.verify_block_family(header, &parent),
None => Ok(()), None => Ok(()),
} }
} else { } else {

View File

@ -28,7 +28,7 @@ use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
use blockchain::BlockChain; use blockchain::BlockChain;
use client::{BlockChainClient, Client}; use client::{BlockChainClient, Client};
use engines::Engine; use engines::EthEngine;
use error::Error; use error::Error;
use ids::BlockId; use ids::BlockId;
use service::ClientIoMessage; use service::ClientIoMessage;
@ -91,7 +91,7 @@ struct RestorationParams<'a> {
writer: Option<LooseWriter>, // writer for recovered snapshot. writer: Option<LooseWriter>, // writer for recovered snapshot.
genesis: &'a [u8], // genesis block of the chain. genesis: &'a [u8], // genesis block of the chain.
guard: Guard, // guard for the restoration directory. guard: Guard, // guard for the restoration directory.
engine: &'a Engine, engine: &'a EthEngine,
} }
impl Restoration { impl Restoration {
@ -145,7 +145,7 @@ impl Restoration {
} }
// feeds a block chunk // feeds a block chunk
fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine, flag: &AtomicBool) -> Result<(), Error> { fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &EthEngine, flag: &AtomicBool) -> Result<(), Error> {
if self.block_chunks_left.contains(&hash) { if self.block_chunks_left.contains(&hash) {
let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?;
@ -161,7 +161,7 @@ impl Restoration {
} }
// finish up restoration. // finish up restoration.
fn finalize(mut self, engine: &Engine) -> Result<(), Error> { fn finalize(mut self, engine: &EthEngine) -> Result<(), Error> {
use trie::TrieError; use trie::TrieError;
if !self.is_done() { return Ok(()) } if !self.is_done() { return Ok(()) }
@ -199,7 +199,7 @@ pub type Channel = IoChannel<ClientIoMessage>;
/// Snapshot service parameters. /// Snapshot service parameters.
pub struct ServiceParams { pub struct ServiceParams {
/// The consensus engine this is built on. /// The consensus engine this is built on.
pub engine: Arc<Engine>, pub engine: Arc<EthEngine>,
/// The chain's genesis block. /// The chain's genesis block.
pub genesis_block: Bytes, pub genesis_block: Bytes,
/// Database configuration options. /// Database configuration options.
@ -225,7 +225,7 @@ pub struct Service {
pruning: Algorithm, pruning: Algorithm,
status: Mutex<RestorationStatus>, status: Mutex<RestorationStatus>,
reader: RwLock<Option<LooseReader>>, reader: RwLock<Option<LooseReader>>,
engine: Arc<Engine>, engine: Arc<EthEngine>,
genesis_block: Bytes, genesis_block: Bytes,
state_chunks: AtomicUsize, state_chunks: AtomicUsize,
block_chunks: AtomicUsize, block_chunks: AtomicUsize,

View File

@ -24,7 +24,7 @@ use account_db::AccountDBMut;
use basic_account::BasicAccount; use basic_account::BasicAccount;
use blockchain::BlockChain; use blockchain::BlockChain;
use client::{BlockChainClient, Client}; use client::{BlockChainClient, Client};
use engines::Engine; use engines::EthEngine;
use snapshot::{StateRebuilder}; use snapshot::{StateRebuilder};
use snapshot::io::{SnapshotReader, PackedWriter, PackedReader}; use snapshot::io::{SnapshotReader, PackedWriter, PackedReader};
@ -160,7 +160,7 @@ pub fn snap(client: &Client) -> GuardedTempResult<Box<SnapshotReader>> {
/// write into the given database. /// write into the given database.
pub fn restore( pub fn restore(
db: Arc<KeyValueDB>, db: Arc<KeyValueDB>,
engine: &Engine, engine: &EthEngine,
reader: &SnapshotReader, reader: &SnapshotReader,
genesis: &[u8], genesis: &[u8],
) -> Result<(), ::error::Error> { ) -> Result<(), ::error::Error> {

View File

@ -38,7 +38,7 @@ fn chunk_and_restore(amount: u64) {
let mut finalizer = BlockFinalizer::default(); let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap(); let genesis = canon_chain.generate(&mut finalizer).unwrap();
let engine = Arc::new(::engines::NullEngine::default()); let engine = ::spec::Spec::new_test().engine;
let new_path = RandomTempPath::create_dir(); let new_path = RandomTempPath::create_dir();
let mut snapshot_path = new_path.as_path().to_owned(); let mut snapshot_path = new_path.as_path().to_owned();
snapshot_path.push("SNAP"); snapshot_path.push("SNAP");
@ -128,7 +128,7 @@ fn checks_flag() {
let chunk = stream.out(); let chunk = stream.out();
let db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
let engine = Arc::new(::engines::NullEngine::default()); let engine = ::spec::Spec::new_test().engine;
let chain = BlockChain::new(Default::default(), &genesis, db.clone()); let chain = BlockChain::new(Default::default(), &genesis, db.clone());
let manifest = ::snapshot::ManifestData { let manifest = ::snapshot::ManifestData {

View File

@ -20,30 +20,39 @@ use std::io::Read;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use rustc_hex::FromHex;
use bigint::hash::{H256, H2048};
use bigint::prelude::U256;
use bytes::Bytes;
use ethjson;
use hash::{KECCAK_NULL_RLP, keccak}; use hash::{KECCAK_NULL_RLP, keccak};
use parking_lot::RwLock;
use rlp::{Rlp, RlpStream};
use rustc_hex::FromHex;
use util::*;
use vm::{EnvInfo, CallType, ActionValue, ActionParams};
use super::genesis::Genesis; use super::genesis::Genesis;
use super::seal::Generic as GenericSeal; use super::seal::Generic as GenericSeal;
use builtin::Builtin; use builtin::Builtin;
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound, Tendermint, DEFAULT_BLOCKHASH_CONTRACT}; use engines::{EthEngine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound, Tendermint, DEFAULT_BLOCKHASH_CONTRACT};
use vm::{EnvInfo, CallType, ActionValue, ActionParams};
use error::Error; use error::Error;
use ethereum;
use ethjson;
use executive::Executive; use executive::Executive;
use factory::Factories; use factory::Factories;
use header::{BlockNumber, Header}; use header::{BlockNumber, Header};
use machine::EthereumMachine;
use pod_state::*; use pod_state::*;
use rlp::{Rlp, RlpStream};
use state::{Backend, State, Substate}; use state::{Backend, State, Substate};
use state::backend::Basic as BasicBackend; use state::backend::Basic as BasicBackend;
use trace::{NoopTracer, NoopVMTracer}; use trace::{NoopTracer, NoopVMTracer};
use bigint::prelude::U256;
use bigint::hash::{H256, H2048}; pub use ethash::OptimizeFor;
use parking_lot::RwLock;
use util::*; // helper for formatting errors.
use bytes::Bytes; fn fmt_err<F: ::std::fmt::Display>(f: F) -> String {
format!("Spec json is invalid: {}", f)
}
/// Parameters common to ethereum-like blockchains. /// Parameters common to ethereum-like blockchains.
/// NOTE: when adding bugfix hard-fork parameters, /// NOTE: when adding bugfix hard-fork parameters,
@ -88,7 +97,8 @@ pub struct CommonParams {
pub eip210_contract_code: Bytes, pub eip210_contract_code: Bytes,
/// Gas allocated for EIP-210 blockhash update. /// Gas allocated for EIP-210 blockhash update.
pub eip210_contract_gas: U256, pub eip210_contract_gas: U256,
/// Number of first block where EIP-211 (Metropolis: RETURNDATASIZE/RETURNDATACOPY) rules begin. /// Number of first block where EIP-211 (Metropolis: RETURNDATASIZE/RETURNDATACOPY) rules
/// begin.
pub eip211_transition: BlockNumber, pub eip211_transition: BlockNumber,
/// Number of first block where EIP-214 rules begin. /// Number of first block where EIP-214 rules begin.
pub eip214_transition: BlockNumber, pub eip214_transition: BlockNumber,
@ -102,12 +112,12 @@ pub struct CommonParams {
pub wasm: bool, pub wasm: bool,
/// Gas limit bound divisor (how much gas limit can change per block) /// Gas limit bound divisor (how much gas limit can change per block)
pub gas_limit_bound_divisor: U256, pub gas_limit_bound_divisor: U256,
/// Block reward in wei.
pub block_reward: U256,
/// Registrar contract address. /// Registrar contract address.
pub registrar: Address, pub registrar: Address,
/// Node permission managing contract address. /// Node permission managing contract address.
pub node_permission_contract: Option<Address>, pub node_permission_contract: Option<Address>,
/// Maximum contract code size that can be deployed.
pub max_code_size: u64,
/// Transaction permission managing contract address. /// Transaction permission managing contract address.
pub transaction_permission_contract: Option<Address>, pub transaction_permission_contract: Option<Address>,
} }
@ -115,7 +125,7 @@ pub struct CommonParams {
impl CommonParams { impl CommonParams {
/// Schedule for an EVM in the post-EIP-150-era of the Ethereum main net. /// Schedule for an EVM in the post-EIP-150-era of the Ethereum main net.
pub fn schedule(&self, block_number: u64) -> ::vm::Schedule { pub fn schedule(&self, block_number: u64) -> ::vm::Schedule {
let mut schedule = ::vm::Schedule::new_post_eip150(usize::max_value(), true, true, true); let mut schedule = ::vm::Schedule::new_post_eip150(self.max_code_size as _, true, true, true);
self.update_schedule(block_number, &mut schedule); self.update_schedule(block_number, &mut schedule);
schedule schedule
} }
@ -139,14 +149,10 @@ impl CommonParams {
/// Whether these params contain any bug-fix hard forks. /// Whether these params contain any bug-fix hard forks.
pub fn contains_bugfix_hard_fork(&self) -> bool { pub fn contains_bugfix_hard_fork(&self) -> bool {
self.eip98_transition != 0 && self.eip98_transition != 0 && self.eip155_transition != 0 &&
self.eip155_transition != 0 && self.validate_receipts_transition != 0 && self.eip86_transition != 0 &&
self.validate_receipts_transition != 0 && self.eip140_transition != 0 && self.eip210_transition != 0 &&
self.eip86_transition != 0 && self.eip211_transition != 0 && self.eip214_transition != 0 &&
self.eip140_transition != 0 &&
self.eip210_transition != 0 &&
self.eip211_transition != 0 &&
self.eip214_transition != 0 &&
self.dust_protection_transition != 0 self.dust_protection_transition != 0
} }
} }
@ -157,44 +163,116 @@ impl From<ethjson::spec::Params> for CommonParams {
account_start_nonce: p.account_start_nonce.map_or_else(U256::zero, Into::into), account_start_nonce: p.account_start_nonce.map_or_else(U256::zero, Into::into),
maximum_extra_data_size: p.maximum_extra_data_size.into(), maximum_extra_data_size: p.maximum_extra_data_size.into(),
network_id: p.network_id.into(), network_id: p.network_id.into(),
chain_id: if let Some(n) = p.chain_id { n.into() } else { p.network_id.into() }, chain_id: if let Some(n) = p.chain_id {
n.into()
} else {
p.network_id.into()
},
subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()), subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()),
min_gas_limit: p.min_gas_limit.into(), min_gas_limit: p.min_gas_limit.into(),
fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None }, fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) {
Some((n.into(), h.into()))
} else {
None
},
eip98_transition: p.eip98_transition.map_or(0, Into::into), eip98_transition: p.eip98_transition.map_or(0, Into::into),
eip155_transition: p.eip155_transition.map_or(0, Into::into), eip155_transition: p.eip155_transition.map_or(0, Into::into),
validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into), validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into),
eip86_transition: p.eip86_transition.map_or(BlockNumber::max_value(), Into::into), eip86_transition: p.eip86_transition.map_or(
eip140_transition: p.eip140_transition.map_or(BlockNumber::max_value(), Into::into), BlockNumber::max_value(),
eip210_transition: p.eip210_transition.map_or(BlockNumber::max_value(), Into::into), Into::into,
),
eip140_transition: p.eip140_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip210_transition: p.eip210_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip210_contract_address: p.eip210_contract_address.map_or(0xf0.into(), Into::into), eip210_contract_address: p.eip210_contract_address.map_or(0xf0.into(), Into::into),
eip210_contract_code: p.eip210_contract_code.map_or_else( eip210_contract_code: p.eip210_contract_code.map_or_else(
|| DEFAULT_BLOCKHASH_CONTRACT.from_hex().expect("Default BLOCKHASH contract is valid"), || {
Into::into), DEFAULT_BLOCKHASH_CONTRACT.from_hex().expect(
"Default BLOCKHASH contract is valid",
)
},
Into::into,
),
eip210_contract_gas: p.eip210_contract_gas.map_or(1000000.into(), Into::into), eip210_contract_gas: p.eip210_contract_gas.map_or(1000000.into(), Into::into),
eip211_transition: p.eip211_transition.map_or(BlockNumber::max_value(), Into::into), eip211_transition: p.eip211_transition.map_or(
eip214_transition: p.eip214_transition.map_or(BlockNumber::max_value(), Into::into), BlockNumber::max_value(),
eip658_transition: p.eip658_transition.map_or(BlockNumber::max_value(), Into::into), Into::into,
dust_protection_transition: p.dust_protection_transition.map_or(BlockNumber::max_value(), Into::into), ),
eip214_transition: p.eip214_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip658_transition: p.eip658_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
dust_protection_transition: p.dust_protection_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
nonce_cap_increment: p.nonce_cap_increment.map_or(64, Into::into), nonce_cap_increment: p.nonce_cap_increment.map_or(64, Into::into),
remove_dust_contracts: p.remove_dust_contracts.unwrap_or(false), remove_dust_contracts: p.remove_dust_contracts.unwrap_or(false),
wasm: p.wasm.unwrap_or(false), wasm: p.wasm.unwrap_or(false),
gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(), gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(),
block_reward: p.block_reward.map_or_else(U256::zero, Into::into),
registrar: p.registrar.map_or_else(Address::new, Into::into), registrar: p.registrar.map_or_else(Address::new, Into::into),
node_permission_contract: p.node_permission_contract.map(Into::into), node_permission_contract: p.node_permission_contract.map(Into::into),
max_code_size: p.max_code_size.map_or(u64::max_value(), Into::into),
transaction_permission_contract: p.transaction_permission_contract.map(Into::into), transaction_permission_contract: p.transaction_permission_contract.map(Into::into),
} }
} }
} }
/// Runtime parameters for the spec that are related to how the software should run the chain,
/// rather than integral properties of the chain itself.
#[derive(Debug, Clone, Copy)]
pub struct SpecParams<'a> {
/// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems
pub cache_dir: &'a Path,
/// Whether to run slower at the expense of better memory usage, or run faster while using
/// more
/// memory. This may get more fine-grained in the future but for now is simply a binary
/// option.
pub optimization_setting: Option<OptimizeFor>,
}
impl<'a> SpecParams<'a> {
/// Create from a cache path, with null values for the other fields
pub fn from_path(path: &'a Path) -> Self {
SpecParams {
cache_dir: path,
optimization_setting: None,
}
}
/// Create from a cache path and an optimization setting
pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self {
SpecParams {
cache_dir: path,
optimization_setting: Some(optimization),
}
}
}
impl<'a, T: AsRef<Path>> From<&'a T> for SpecParams<'a> {
fn from(path: &'a T) -> Self {
Self::from_path(path.as_ref())
}
}
/// Parameters for a block chain; includes both those intrinsic to the design of the /// Parameters for a block chain; includes both those intrinsic to the design of the
/// chain and those to be interpreted by the active chain engine. /// chain and those to be interpreted by the active chain engine.
pub struct Spec { pub struct Spec {
/// User friendly spec name /// User friendly spec name
pub name: String, pub name: String,
/// What engine are we using for this? /// What engine are we using for this?
pub engine: Arc<Engine>, pub engine: Arc<EthEngine>,
/// Name of the subdir inside the main data dir to use for chain data and settings. /// Name of the subdir inside the main data dir to use for chain data and settings.
pub data_dir: String, pub data_dir: String,
@ -257,16 +335,27 @@ impl Clone for Spec {
} }
} }
/// Load from JSON object. fn load_machine_from(s: ethjson::spec::Spec) -> EthereumMachine {
pub fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result<Spec, Error> {
let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect(); let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
let params = CommonParams::from(s.params);
Spec::machine(&s.engine, params, builtins)
}
/// Load from JSON object.
fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Error> {
let builtins = s.accounts
.builtins()
.into_iter()
.map(|p| (p.0.into(), From::from(p.1)))
.collect();
let g = Genesis::from(s.genesis); let g = Genesis::from(s.genesis);
let GenericSeal(seal_rlp) = g.seal.into(); let GenericSeal(seal_rlp) = g.seal.into();
let params = CommonParams::from(s.params); let params = CommonParams::from(s.params);
let mut s = Spec { let mut s = Spec {
name: s.name.clone().into(), name: s.name.clone().into(),
engine: Spec::engine(cache_dir, s.engine, params, builtins), engine: Spec::engine(spec_params, s.engine, params, builtins),
data_dir: s.data_dir.unwrap_or(s.name).into(), data_dir: s.data_dir.unwrap_or(s.name).into(),
nodes: s.nodes.unwrap_or_else(Vec::new), nodes: s.nodes.unwrap_or_else(Vec::new),
parent_hash: g.parent_hash, parent_hash: g.parent_hash,
@ -279,7 +368,11 @@ pub fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result
timestamp: g.timestamp, timestamp: g.timestamp,
extra_data: g.extra_data, extra_data: g.extra_data,
seal_rlp: seal_rlp, seal_rlp: seal_rlp,
constructors: s.accounts.constructors().into_iter().map(|(a, c)| (a.into(), c.into())).collect(), constructors: s.accounts
.constructors()
.into_iter()
.map(|(a, c)| (a.into(), c.into()))
.collect(),
state_root_memo: RwLock::new(Default::default()), // will be overwritten right after. state_root_memo: RwLock::new(Default::default()), // will be overwritten right after.
genesis_state: s.accounts.into(), genesis_state: s.accounts.into(),
}; };
@ -287,7 +380,12 @@ pub fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result
// use memoized state root if provided. // use memoized state root if provided.
match g.state_root { match g.state_root {
Some(root) => *s.state_root_memo.get_mut() = root, Some(root) => *s.state_root_memo.get_mut() = root,
None => { let _ = s.run_constructors(&Default::default(), BasicBackend(MemoryDB::new()))?; }, None => {
let _ = s.run_constructors(
&Default::default(),
BasicBackend(MemoryDB::new()),
)?;
}
} }
Ok(s) Ok(s)
@ -302,26 +400,52 @@ macro_rules! load_bundled {
}; };
} }
macro_rules! load_machine_bundled {
($e:expr) => {
Spec::load_machine(
include_bytes!(concat!("../../res/", $e, ".json")) as &[u8]
).expect(concat!("Chain spec ", $e, " is invalid."))
};
}
impl Spec { impl Spec {
/// Convert engine spec into a arc'd Engine of the right underlying type. // create an instance of an Ethereum state machine, minus consensus logic.
/// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. fn machine(
fn engine<T: AsRef<Path>>( engine_spec: &ethjson::spec::Engine,
cache_dir: T,
engine_spec: ethjson::spec::Engine,
params: CommonParams, params: CommonParams,
builtins: BTreeMap<Address, Builtin>, builtins: BTreeMap<Address, Builtin>,
) -> Arc<Engine> { ) -> EthereumMachine {
match engine_spec { if let ethjson::spec::Engine::Ethash(ref ethash) = *engine_spec {
ethjson::spec::Engine::Null => Arc::new(NullEngine::new(params, builtins)), EthereumMachine::with_ethash_extensions(params, builtins, ethash.params.clone().into())
ethjson::spec::Engine::InstantSeal => Arc::new(InstantSeal::new(params, builtins)), } else {
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(cache_dir, params, From::from(ethash.params), builtins)), EthereumMachine::regular(params, builtins)
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Failed to start AuthorityRound consensus engine."),
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(params, From::from(tendermint.params), builtins).expect("Failed to start the Tendermint consensus engine."),
} }
} }
// given a pre-constructor state, run all the given constructors and produce a new state and state root. /// Convert engine spec into a arc'd Engine of the right underlying type.
/// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
fn engine(
spec_params: SpecParams,
engine_spec: ethjson::spec::Engine,
params: CommonParams,
builtins: BTreeMap<Address, Builtin>,
) -> Arc<EthEngine> {
let machine = Self::machine(&engine_spec, params, builtins);
match engine_spec {
ethjson::spec::Engine::Null(null) => Arc::new(NullEngine::new(null.params.into(), machine)),
ethjson::spec::Engine::Ethash(ethash) => Arc::new(::ethereum::Ethash::new(spec_params.cache_dir, ethash.params.into(), machine, spec_params.optimization_setting)),
ethjson::spec::Engine::InstantSeal => Arc::new(InstantSeal::new(machine)),
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(basic_authority.params.into(), machine)),
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(authority_round.params.into(), machine)
.expect("Failed to start AuthorityRound consensus engine."),
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(tendermint.params.into(), machine)
.expect("Failed to start the Tendermint consensus engine."),
}
}
// given a pre-constructor state, run all the given constructors and produce a new state and
// state root.
fn run_constructors<T: Backend>(&self, factories: &Factories, mut db: T) -> Result<T, Error> { fn run_constructors<T: Backend>(&self, factories: &Factories, mut db: T) -> Result<T, Error> {
let mut root = KECCAK_NULL_RLP; let mut root = KECCAK_NULL_RLP;
@ -337,20 +461,18 @@ impl Spec {
for (address, account) in self.genesis_state.get().iter() { for (address, account) in self.genesis_state.get().iter() {
db.note_non_null_account(address); db.note_non_null_account(address);
account.insert_additional( account.insert_additional(
&mut *factories.accountdb.create(db.as_hashdb_mut(), keccak(address)), &mut *factories.accountdb.create(
&factories.trie db.as_hashdb_mut(),
keccak(address),
),
&factories.trie,
); );
} }
let start_nonce = self.engine.account_start_nonce(0); let start_nonce = self.engine.account_start_nonce(0);
let (root, db) = { let (root, db) = {
let mut state = State::from_existing( let mut state = State::from_existing(db, root, start_nonce, factories.clone())?;
db,
root,
start_nonce,
factories.clone(),
)?;
// Execute contract constructors. // Execute contract constructors.
let env_info = EnvInfo { let env_info = EnvInfo {
@ -384,7 +506,7 @@ impl Spec {
let mut substate = Substate::new(); let mut substate = Substate::new();
{ {
let mut exec = Executive::new(&mut state, &env_info, self.engine.as_ref()); let mut exec = Executive::new(&mut state, &env_info, self.engine.machine());
if let Err(e) = exec.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) { if let Err(e) = exec.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) {
warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e);
} }
@ -410,22 +532,34 @@ impl Spec {
} }
/// Get common blockchain parameters. /// Get common blockchain parameters.
pub fn params(&self) -> &CommonParams { &self.engine.params() } pub fn params(&self) -> &CommonParams {
&self.engine.params()
}
/// Get the known knodes of the network in enode format. /// Get the known knodes of the network in enode format.
pub fn nodes(&self) -> &[String] { &self.nodes } pub fn nodes(&self) -> &[String] {
&self.nodes
}
/// Get the configured Network ID. /// Get the configured Network ID.
pub fn network_id(&self) -> u64 { self.params().network_id } pub fn network_id(&self) -> u64 {
self.params().network_id
}
/// Get the chain ID used for signing. /// Get the chain ID used for signing.
pub fn chain_id(&self) -> u64 { self.params().chain_id } pub fn chain_id(&self) -> u64 {
self.params().chain_id
}
/// Get the configured subprotocol name. /// Get the configured subprotocol name.
pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() } pub fn subprotocol_name(&self) -> String {
self.params().subprotocol_name.clone()
}
/// Get the configured network fork block. /// Get the configured network fork block.
pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params().fork_block } pub fn fork_block(&self) -> Option<(BlockNumber, H256)> {
self.params().fork_block
}
/// Get the header of the genesis block. /// Get the header of the genesis block.
pub fn genesis_header(&self) -> Header { pub fn genesis_header(&self) -> Header {
@ -480,7 +614,10 @@ impl Spec {
/// Alter the value of the genesis state. /// Alter the value of the genesis state.
pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> { pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> {
self.genesis_state = s; self.genesis_state = s;
let _ = self.run_constructors(&Default::default(), BasicBackend(MemoryDB::new()))?; let _ = self.run_constructors(
&Default::default(),
BasicBackend(MemoryDB::new()),
)?;
Ok(()) Ok(())
} }
@ -496,7 +633,7 @@ impl Spec {
/// Ensure that the given state DB has the trie nodes in for the genesis state. /// Ensure that the given state DB has the trie nodes in for the genesis state.
pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> { pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> {
if db.as_hashdb().contains(&self.state_root()) { if db.as_hashdb().contains(&self.state_root()) {
return Ok(db) return Ok(db);
} }
// TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever
@ -505,15 +642,25 @@ impl Spec {
Ok(db) Ok(db)
} }
/// Loads spec from json file. Provide factories for executing contracts and ensuring /// Loads just the state machine from a json file.
/// storage goes to the right place. pub fn load_machine<R: Read>(reader: R) -> Result<EthereumMachine, String> {
pub fn load<T: AsRef<Path>, R>(cache_dir: T, reader: R) -> Result<Self, String> where R: Read { ethjson::spec::Spec::load(reader)
fn fmt<F: ::std::fmt::Display>(f: F) -> String { .map_err(fmt_err)
format!("Spec json is invalid: {}", f) .map(load_machine_from)
} }
ethjson::spec::Spec::load(reader).map_err(fmt) /// Loads spec from json file. Provide factories for executing contracts and ensuring
.and_then(|x| load_from(cache_dir, x).map_err(fmt)) /// storage goes to the right place.
pub fn load<'a, T: Into<SpecParams<'a>>, R>(params: T, reader: R) -> Result<Self, String>
where
R: Read,
{
ethjson::spec::Spec::load(reader).map_err(fmt_err).and_then(
|x| {
load_from(params.into(), x).map_err(fmt_err)
},
)
} }
/// initialize genesis epoch data, using in-memory database for /// initialize genesis epoch data, using in-memory database for
@ -543,7 +690,7 @@ impl Spec {
difficulty: *genesis.difficulty(), difficulty: *genesis.difficulty(),
gas_limit: *genesis.gas_limit(), gas_limit: *genesis.gas_limit(),
last_hashes: Arc::new(Vec::new()), last_hashes: Arc::new(Vec::new()),
gas_used: 0.into() gas_used: 0.into(),
}; };
let from = Address::default(); let from = Address::default();
@ -560,92 +707,145 @@ impl Spec {
db.as_hashdb_mut(), db.as_hashdb_mut(),
*genesis.state_root(), *genesis.state_root(),
&tx, &tx,
&*self.engine, self.engine.machine(),
&env_info, &env_info,
factories.clone(), factories.clone(),
true, true,
); );
res.map(|(out, proof)| (out, proof.into_iter().map(|x| x.into_vec()).collect())) res.map(|(out, proof)| {
.ok_or_else(|| "Failed to prove call: insufficient state".into()) (out, proof.into_iter().map(|x| x.into_vec()).collect())
}).ok_or_else(|| "Failed to prove call: insufficient state".into())
}; };
self.engine.genesis_epoch_data(&genesis, &call) self.engine.genesis_epoch_data(&genesis, &call)
} }
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus. /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a
pub fn new_test() -> Spec { load_bundled!("null_morden") } /// NullEngine consensus.
pub fn new_test() -> Spec {
load_bundled!("null_morden")
}
/// Create the EthereumMachine corresponding to Spec::new_test.
pub fn new_test_machine() -> EthereumMachine { load_machine_bundled!("null_morden") }
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with applying reward on block close. /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with applying reward on block close.
pub fn new_test_with_reward() -> Spec { load_bundled!("null_morden_with_reward") } pub fn new_test_with_reward() -> Spec { load_bundled!("null_morden_with_reward") }
/// Create a new Spec which is a NullEngine consensus with a premine of address whose secret is keccak(''). /// Create a new Spec which is a NullEngine consensus with a premine of address whose
pub fn new_null() -> Spec { load_bundled!("null") } /// secret is keccak('').
pub fn new_null() -> Spec {
load_bundled!("null")
}
/// Create a new Spec which constructs a contract at address 5 with storage at 0 equal to 1. /// Create a new Spec which constructs a contract at address 5 with storage at 0 equal to 1.
pub fn new_test_constructor() -> Spec { load_bundled!("constructor") } pub fn new_test_constructor() -> Spec {
load_bundled!("constructor")
}
/// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring work). /// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring
pub fn new_instant() -> Spec { load_bundled!("instant_seal") } /// work).
pub fn new_instant() -> Spec {
load_bundled!("instant_seal")
}
/// Create a new Spec with AuthorityRound consensus which does internal sealing (not requiring work). /// Create a new Spec with AuthorityRound consensus which does internal sealing (not
/// requiring work).
/// Accounts with secrets keccak("0") and keccak("1") are the validators. /// Accounts with secrets keccak("0") and keccak("1") are the validators.
pub fn new_test_round() -> Self { load_bundled!("authority_round") } pub fn new_test_round() -> Self {
load_bundled!("authority_round")
}
/// Create a new Spec with Tendermint consensus which does internal sealing (not requiring work). /// Create a new Spec with Tendermint consensus which does internal sealing (not requiring
/// work).
/// Account keccak("0") and keccak("1") are a authorities. /// Account keccak("0") and keccak("1") are a authorities.
pub fn new_test_tendermint() -> Self { load_bundled!("tendermint") } pub fn new_test_tendermint() -> Self {
load_bundled!("tendermint")
}
/// TestList.sol used in both specs: https://github.com/paritytech/contracts/pull/30/files /// TestList.sol used in both specs: https://github.com/paritytech/contracts/pull/30/files
/// Accounts with secrets keccak("0") and keccak("1") are initially the validators. /// Accounts with secrets keccak("0") and keccak("1") are initially the validators.
/// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine the current validators using `getValidators`. /// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine
/// Second validator can be removed with "0xbfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" and added back in using "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1". /// the current validators using `getValidators`.
pub fn new_validator_safe_contract() -> Self { load_bundled!("validator_safe_contract") } /// Second validator can be removed with
/// "0xbfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" and added
/// back in using
/// "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".
pub fn new_validator_safe_contract() -> Self {
load_bundled!("validator_safe_contract")
}
/// The same as the `safeContract`, but allows reporting and uses AuthorityRound. /// The same as the `safeContract`, but allows reporting and uses AuthorityRound.
/// Account is marked with `reportBenign` it can be checked as disliked with "0xd8f2e0bf". /// Account is marked with `reportBenign` it can be checked as disliked with "0xd8f2e0bf".
/// Validator can be removed with `reportMalicious`. /// Validator can be removed with `reportMalicious`.
pub fn new_validator_contract() -> Self { load_bundled!("validator_contract") } pub fn new_validator_contract() -> Self {
load_bundled!("validator_contract")
}
/// Create a new Spec with BasicAuthority which uses multiple validator sets changing with height. /// Create a new Spec with BasicAuthority which uses multiple validator sets changing with
/// Account with secrets keccak("0") is the validator for block 1 and with keccak("1") onwards. /// height.
pub fn new_validator_multi() -> Self { load_bundled!("validator_multi") } /// Account with secrets keccak("0") is the validator for block 1 and with keccak("1")
/// onwards.
pub fn new_validator_multi() -> Self {
load_bundled!("validator_multi")
}
/// Create a new spec for a PoW chain /// Create a new spec for a PoW chain
pub fn new_pow_test_spec() -> Self { load_bundled!("ethereum/olympic") } pub fn new_pow_test_spec() -> Self {
load_bundled!("ethereum/olympic")
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str::FromStr;
use util::*;
use views::*;
use tests::helpers::get_temp_state_db;
use state::State;
use super::*; use super::*;
use state::State;
use std::str::FromStr;
use tests::helpers::get_temp_state_db;
use views::*;
// https://github.com/paritytech/parity/issues/1840 // https://github.com/paritytech/parity/issues/1840
#[test] #[test]
fn test_load_empty() { fn test_load_empty() {
assert!(Spec::load(::std::env::temp_dir(), &[] as &[u8]).is_err()); assert!(Spec::load(&::std::env::temp_dir(), &[] as &[u8]).is_err());
} }
#[test] #[test]
fn test_chain() { fn test_chain() {
let test_spec = Spec::new_test(); let test_spec = Spec::new_test();
assert_eq!(test_spec.state_root(), H256::from_str("f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9").unwrap()); assert_eq!(
test_spec.state_root(),
H256::from_str(
"f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9",
).unwrap()
);
let genesis = test_spec.genesis_block(); let genesis = test_spec.genesis_block();
assert_eq!(BlockView::new(&genesis).header_view().hash(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap()); assert_eq!(
BlockView::new(&genesis).header_view().hash(),
H256::from_str(
"0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303",
).unwrap()
);
} }
#[test] #[test]
fn genesis_constructor() { fn genesis_constructor() {
::ethcore_logger::init_log(); ::ethcore_logger::init_log();
let spec = Spec::new_test_constructor(); let spec = Spec::new_test_constructor();
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default())
let state = State::from_existing(db.boxed_clone(), spec.state_root(), spec.engine.account_start_nonce(0), Default::default()).unwrap(); .unwrap();
let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); let state = State::from_existing(
db.boxed_clone(),
spec.state_root(),
spec.engine.account_start_nonce(0),
Default::default(),
).unwrap();
let expected = H256::from_str(
"0000000000000000000000000000000000000000000000000000000000000001",
).unwrap();
let address = Address::from_str("0000000000000000000000000000000000000005").unwrap(); let address = Address::from_str("0000000000000000000000000000000000000005").unwrap();
assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected); assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected);

View File

@ -27,7 +27,7 @@ use std::sync::Arc;
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY}; use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY};
use receipt::{Receipt, TransactionOutcome}; use receipt::{Receipt, TransactionOutcome};
use engines::Engine; use machine::EthereumMachine as Machine;
use vm::EnvInfo; use vm::EnvInfo;
use error::Error; use error::Error;
use executive::{Executive, TransactOptions}; use executive::{Executive, TransactOptions};
@ -196,7 +196,7 @@ pub fn check_proof(
proof: &[::util::DBValue], proof: &[::util::DBValue],
root: H256, root: H256,
transaction: &SignedTransaction, transaction: &SignedTransaction,
engine: &Engine, machine: &Machine,
env_info: &EnvInfo, env_info: &EnvInfo,
) -> ProvedExecution { ) -> ProvedExecution {
let backend = self::backend::ProofCheck::new(proof); let backend = self::backend::ProofCheck::new(proof);
@ -206,7 +206,7 @@ pub fn check_proof(
let res = State::from_existing( let res = State::from_existing(
backend, backend,
root, root,
engine.account_start_nonce(env_info.number), machine.account_start_nonce(env_info.number),
factories factories
); );
@ -216,7 +216,7 @@ pub fn check_proof(
}; };
let options = TransactOptions::with_no_tracing().save_output_from_contract(); let options = TransactOptions::with_no_tracing().save_output_from_contract();
match state.execute(env_info, engine, transaction, options, true) { match state.execute(env_info, machine, transaction, options, true) {
Ok(executed) => ProvedExecution::Complete(executed), Ok(executed) => ProvedExecution::Complete(executed),
Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof, Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof,
Err(e) => ProvedExecution::Failed(e), Err(e) => ProvedExecution::Failed(e),
@ -230,7 +230,7 @@ pub fn prove_transaction<H: AsHashDB + Send + Sync>(
db: H, db: H,
root: H256, root: H256,
transaction: &SignedTransaction, transaction: &SignedTransaction,
engine: &Engine, machine: &Machine,
env_info: &EnvInfo, env_info: &EnvInfo,
factories: Factories, factories: Factories,
virt: bool, virt: bool,
@ -241,7 +241,7 @@ pub fn prove_transaction<H: AsHashDB + Send + Sync>(
let res = State::from_existing( let res = State::from_existing(
backend, backend,
root, root,
engine.account_start_nonce(env_info.number), machine.account_start_nonce(env_info.number),
factories, factories,
); );
@ -251,7 +251,7 @@ pub fn prove_transaction<H: AsHashDB + Send + Sync>(
}; };
let options = TransactOptions::with_no_tracing().dont_check_nonce().save_output_from_contract(); let options = TransactOptions::with_no_tracing().dont_check_nonce().save_output_from_contract();
match state.execute(env_info, engine, transaction, options, virt) { match state.execute(env_info, machine, transaction, options, virt) {
Err(ExecutionError::Internal(_)) => None, Err(ExecutionError::Internal(_)) => None,
Err(e) => { Err(e) => {
trace!(target: "state", "Proved call failed: {}", e); trace!(target: "state", "Proved call failed: {}", e);
@ -668,13 +668,13 @@ impl<B: Backend> State<B> {
/// Execute a given transaction, producing a receipt and an optional trace. /// Execute a given transaction, producing a receipt and an optional trace.
/// This will change the state accordingly. /// This will change the state accordingly.
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult { pub fn apply(&mut self, env_info: &EnvInfo, machine: &Machine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
if tracing { if tracing {
let options = TransactOptions::with_tracing(); let options = TransactOptions::with_tracing();
self.apply_with_tracing(env_info, engine, t, options.tracer, options.vm_tracer) self.apply_with_tracing(env_info, machine, t, options.tracer, options.vm_tracer)
} else { } else {
let options = TransactOptions::with_no_tracing(); let options = TransactOptions::with_no_tracing();
self.apply_with_tracing(env_info, engine, t, options.tracer, options.vm_tracer) self.apply_with_tracing(env_info, machine, t, options.tracer, options.vm_tracer)
} }
} }
@ -683,7 +683,7 @@ impl<B: Backend> State<B> {
pub fn apply_with_tracing<V, T>( pub fn apply_with_tracing<V, T>(
&mut self, &mut self,
env_info: &EnvInfo, env_info: &EnvInfo,
engine: &Engine, machine: &Machine,
t: &SignedTransaction, t: &SignedTransaction,
tracer: T, tracer: T,
vm_tracer: V, vm_tracer: V,
@ -692,12 +692,13 @@ impl<B: Backend> State<B> {
V: trace::VMTracer, V: trace::VMTracer,
{ {
let options = TransactOptions::new(tracer, vm_tracer); let options = TransactOptions::new(tracer, vm_tracer);
let e = self.execute(env_info, engine, t, options, false)?; let e = self.execute(env_info, machine, t, options, false)?;
let params = machine.params();
let eip658 = env_info.number >= engine.params().eip658_transition; let eip658 = env_info.number >= params.eip658_transition;
let no_intermediate_commits = let no_intermediate_commits =
eip658 || eip658 ||
(env_info.number >= engine.params().eip98_transition && env_info.number >= engine.params().validate_receipts_transition); (env_info.number >= params.eip98_transition && env_info.number >= params.validate_receipts_transition);
let outcome = if no_intermediate_commits { let outcome = if no_intermediate_commits {
if eip658 { if eip658 {
@ -726,10 +727,10 @@ impl<B: Backend> State<B> {
// //
// `virt` signals that we are executing outside of a block set and restrictions like // `virt` signals that we are executing outside of a block set and restrictions like
// gas limits and gas costs should be lifted. // gas limits and gas costs should be lifted.
fn execute<T, V>(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, options: TransactOptions<T, V>, virt: bool) fn execute<T, V>(&mut self, env_info: &EnvInfo, machine: &Machine, t: &SignedTransaction, options: TransactOptions<T, V>, virt: bool)
-> Result<Executed, ExecutionError> where T: trace::Tracer, V: trace::VMTracer, -> Result<Executed, ExecutionError> where T: trace::Tracer, V: trace::VMTracer,
{ {
let mut e = Executive::new(self, env_info, engine); let mut e = Executive::new(self, env_info, machine);
match virt { match virt {
true => e.transact_virtual(t, options), true => e.transact_virtual(t, options),
@ -1071,6 +1072,7 @@ mod tests {
use bigint::hash::H256; use bigint::hash::H256;
use util::Address; use util::Address;
use tests::helpers::*; use tests::helpers::*;
use machine::EthereumMachine;
use vm::EnvInfo; use vm::EnvInfo;
use spec::*; use spec::*;
use transaction::*; use transaction::*;
@ -1082,6 +1084,12 @@ mod tests {
keccak("").into() keccak("").into()
} }
fn make_frontier_machine(max_depth: usize) -> EthereumMachine {
let mut machine = ::ethereum::new_frontier_test_machine();
machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth));
machine
}
#[test] #[test]
fn should_apply_create_transaction() { fn should_apply_create_transaction() {
init_log(); init_log();
@ -1090,7 +1098,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1102,7 +1110,7 @@ mod tests {
}.sign(&secret(), None); }.sign(&secret(), None);
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
subtraces: 0, subtraces: 0,
@ -1148,7 +1156,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1160,7 +1168,7 @@ mod tests {
}.sign(&secret(), None); }.sign(&secret(), None);
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
action: trace::Action::Create(trace::Create { action: trace::Action::Create(trace::Create {
@ -1184,7 +1192,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1197,7 +1205,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()).unwrap();
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
@ -1226,7 +1234,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1238,7 +1246,7 @@ mod tests {
}.sign(&secret(), None); }.sign(&secret(), None);
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
@ -1267,7 +1275,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = &*Spec::new_test().engine; let machine = Spec::new_test_machine();
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1278,7 +1286,7 @@ mod tests {
data: vec![], data: vec![],
}.sign(&secret(), None); }.sign(&secret(), None);
let result = state.apply(&info, engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
@ -1308,7 +1316,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = &*Spec::new_test().engine; let machine = Spec::new_test_machine();
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1320,7 +1328,7 @@ mod tests {
}.sign(&secret(), None); }.sign(&secret(), None);
state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()).unwrap();
let result = state.apply(&info, engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
@ -1333,7 +1341,7 @@ mod tests {
call_type: CallType::Call, call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(28_061), gas_used: U256::from(3_721), // in post-eip150
output: vec![] output: vec![]
}), }),
subtraces: 0, subtraces: 0,
@ -1350,7 +1358,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = &*Spec::new_test().engine; let machine = Spec::new_test_machine();
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1363,7 +1371,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()).unwrap();
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap();
let result = state.apply(&info, engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
@ -1377,7 +1385,7 @@ mod tests {
call_type: CallType::Call, call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: 64.into(), gas_used: 724.into(), // in post-eip150
output: vec![] output: vec![]
}), }),
}, FlatTrace { }, FlatTrace {
@ -1409,9 +1417,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
info.number = 0x789b0; info.number = 0x789b0;
let engine = &*Spec::new_test().engine; let machine = Spec::new_test_machine();
println!("schedule.have_delegate_call: {:?}", engine.schedule(info.number).have_delegate_call);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1424,7 +1430,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap();
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap();
let result = state.apply(&info, engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
@ -1438,7 +1444,7 @@ mod tests {
call_type: CallType::Call, call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(61), gas_used: U256::from(721), // in post-eip150
output: vec![] output: vec![]
}), }),
}, FlatTrace { }, FlatTrace {
@ -1469,7 +1475,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1482,7 +1488,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()).unwrap();
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
@ -1508,7 +1514,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1522,7 +1528,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap();
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap();
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
@ -1567,7 +1573,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1580,7 +1586,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()).unwrap();
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
subtraces: 1, subtraces: 1,
@ -1621,7 +1627,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1634,7 +1640,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()).unwrap(); // not enough funds. state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()).unwrap(); // not enough funds.
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
subtraces: 0, subtraces: 0,
@ -1663,7 +1669,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1677,7 +1683,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()).unwrap();
state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()).unwrap(); state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()).unwrap();
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
subtraces: 1, subtraces: 1,
@ -1718,7 +1724,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1733,7 +1739,7 @@ mod tests {
state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()).unwrap(); state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()).unwrap();
state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap();
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
subtraces: 1, subtraces: 1,
@ -1792,7 +1798,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1807,7 +1813,7 @@ mod tests {
state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()).unwrap(); state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()).unwrap();
state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap(); state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()).unwrap();
state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &(100.into()), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
@ -1864,7 +1870,7 @@ mod tests {
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5); let machine = make_frontier_machine(5);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1878,7 +1884,7 @@ mod tests {
state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()).unwrap();
state.add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty).unwrap(); state.add_balance(&0xa.into(), &50.into(), CleanupMode::NoEmpty).unwrap();
state.add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty).unwrap(); state.add_balance(&t.sender(), &100.into(), CleanupMode::NoEmpty).unwrap();
let result = state.apply(&info, &engine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
trace_address: Default::default(), trace_address: Default::default(),
subtraces: 1, subtraces: 1,

View File

@ -362,7 +362,7 @@ fn transaction_proof() {
let root = client.best_block_header().state_root(); let root = client.best_block_header().state_root();
let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap(); let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap();
Executive::new(&mut state, &client.latest_env_info(), &*test_spec.engine) Executive::new(&mut state, &client.latest_env_info(), test_spec.engine.machine())
.transact(&transaction, TransactOptions::with_no_tracing().dont_check_nonce()).unwrap(); .transact(&transaction, TransactOptions::with_no_tracing().dont_check_nonce()).unwrap();
assert_eq!(state.balance(&Address::default()).unwrap(), 5.into()); assert_eq!(state.balance(&Address::default()).unwrap(), 5.into());

View File

@ -24,7 +24,7 @@ fn test_blockhash_eip210(factory: Factory) {
let test_blockhash_contract = "73fffffffffffffffffffffffffffffffffffffffe33141561007a57600143036020526000356101006020510755600061010060205107141561005057600035610100610100602051050761010001555b6000620100006020510714156100755760003561010062010000602051050761020001555b61014a565b4360003512151561009057600060405260206040f35b610100600035430312156100b357610100600035075460605260206060f3610149565b62010000600035430312156100d157600061010060003507146100d4565b60005b156100f6576101006101006000350507610100015460805260206080f3610148565b630100000060003543031215610116576000620100006000350714610119565b60005b1561013c57610100620100006000350507610200015460a052602060a0f3610147565b600060c052602060c0f35b5b5b5b5b"; let test_blockhash_contract = "73fffffffffffffffffffffffffffffffffffffffe33141561007a57600143036020526000356101006020510755600061010060205107141561005057600035610100610100602051050761010001555b6000620100006020510714156100755760003561010062010000602051050761020001555b61014a565b4360003512151561009057600060405260206040f35b610100600035430312156100b357610100600035075460605260206060f3610149565b62010000600035430312156100d157600061010060003507146100d4565b60005b156100f6576101006101006000350507610100015460805260206080f3610148565b630100000060003543031215610116576000620100006000350714610119565b60005b1561013c57610100620100006000350507610200015460a052602060a0f3610147565b600060c052602060c0f35b5b5b5b5b";
let blockhash_contract_code = Arc::new(test_blockhash_contract.from_hex().unwrap()); let blockhash_contract_code = Arc::new(test_blockhash_contract.from_hex().unwrap());
let blockhash_contract_code_hash = keccak(blockhash_contract_code.as_ref()); let blockhash_contract_code_hash = keccak(blockhash_contract_code.as_ref());
let engine = TestEngine::new_constantinople(); let machine = ::ethereum::new_constantinople_test_machine();
let mut env_info = EnvInfo::default(); let mut env_info = EnvInfo::default();
// populate state with 256 last hashes // populate state with 256 last hashes
@ -46,7 +46,7 @@ fn test_blockhash_eip210(factory: Factory) {
data: Some(H256::from(i - 1).to_vec()), data: Some(H256::from(i - 1).to_vec()),
call_type: CallType::Call, call_type: CallType::Call,
}; };
let mut ex = Executive::new(&mut state, &env_info, &engine); let mut ex = Executive::new(&mut state, &env_info, &machine);
let mut substate = Substate::new(); let mut substate = Substate::new();
let mut output = []; let mut output = [];
if let Err(e) = ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer) { if let Err(e) = ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer) {
@ -68,7 +68,7 @@ fn test_blockhash_eip210(factory: Factory) {
data: None, data: None,
call_type: CallType::Call, call_type: CallType::Call,
}; };
let mut ex = Executive::new(&mut state, &env_info, &engine); let mut ex = Executive::new(&mut state, &env_info, &machine);
let mut substate = Substate::new(); let mut substate = Substate::new();
let mut output = H256::new(); let mut output = H256::new();
if let Err(e) = ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer) { if let Err(e) = ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer) {

View File

@ -14,82 +14,31 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeMap;
use std::sync::Arc;
use hash::keccak;
use ethkey::KeyPair;
use io::*;
use client::{BlockChainClient, Client, ClientConfig};
use bigint::prelude::U256;
use bigint::hash::H256;
use util::*;
use bytes::Bytes;
use spec::*;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use state_db::StateDB; use bigint::hash::H256;
use bigint::prelude::U256;
use block::{OpenBlock, Drain}; use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig}; use blockchain::{BlockChain, Config as BlockChainConfig};
use builtin::Builtin; use bytes::Bytes;
use state::*; use client::{BlockChainClient, Client, ClientConfig};
use evm::{Schedule, Factory as EvmFactory};
use factory::Factories;
use engines::Engine;
use ethereum;
use ethereum::ethash::EthashParams; use ethereum::ethash::EthashParams;
use miner::Miner; use ethkey::KeyPair;
use evm::Factory as EvmFactory;
use factory::Factories;
use hash::keccak;
use header::Header; use header::Header;
use transaction::{Action, Transaction, SignedTransaction}; use io::*;
use machine::EthashExtensions;
use miner::Miner;
use rlp::{self, RlpStream}; use rlp::{self, RlpStream};
use spec::*;
use state_db::StateDB;
use state::*;
use std::sync::Arc;
use transaction::{Action, Transaction, SignedTransaction};
use util::*;
use views::BlockView; use views::BlockView;
pub struct TestEngine {
engine: Arc<Engine>,
max_depth: usize,
}
impl TestEngine {
pub fn new(max_depth: usize) -> TestEngine {
TestEngine {
engine: ethereum::new_frontier_test().engine,
max_depth: max_depth,
}
}
pub fn new_byzantium() -> TestEngine {
TestEngine {
engine: ethereum::new_byzantium_test().engine,
max_depth: 0,
}
}
pub fn new_constantinople() -> TestEngine {
TestEngine {
engine: ethereum::new_constantinople_test().engine,
max_depth: 0,
}
}
}
impl Engine for TestEngine {
fn name(&self) -> &str {
"TestEngine"
}
fn params(&self) -> &CommonParams {
self.engine.params()
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
self.engine.builtins()
}
fn schedule(&self, _block_number: u64) -> Schedule {
let mut schedule = self.engine.schedule(0);
schedule.max_depth = self.max_depth;
schedule
}
}
// TODO: move everything over to get_null_spec. // TODO: move everything over to get_null_spec.
pub fn get_test_spec() -> Spec { pub fn get_test_spec() -> Spec {
Spec::new_test() Spec::new_test()
@ -233,9 +182,9 @@ pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, ac
pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) { pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) {
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
//let test_engine = test_spec.to_engine().unwrap();
let state_root = test_spec.genesis_header().state_root().clone(); let state_root = test_spec.genesis_header().state_root().clone();
let genesis_gas = test_spec.genesis_header().gas_limit().clone();
let mut rolling_hash = client.chain_info().best_block_hash; let mut rolling_hash = client.chain_info().best_block_hash;
let mut rolling_block_number = starting_number as u64; let mut rolling_block_number = starting_number as u64;
let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10; let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10;
@ -243,7 +192,7 @@ pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting
for _ in 0..block_number { for _ in 0..block_number {
let mut header = Header::new(); let mut header = Header::new();
header.set_gas_limit(test_engine.params().min_gas_limit); header.set_gas_limit(genesis_gas);
header.set_difficulty(U256::from(0x20000)); header.set_difficulty(U256::from(0x20000));
header.set_timestamp(rolling_timestamp); header.set_timestamp(rolling_timestamp);
header.set_number(rolling_block_number); header.set_number(rolling_block_number);
@ -272,9 +221,9 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
IoChannel::disconnected(), IoChannel::disconnected(),
).unwrap(); ).unwrap();
for block in &blocks { for block in blocks {
if client.import_block(block.clone()).is_err() { if let Err(e) = client.import_block(block) {
panic!("panic importing block which is well-formed"); panic!("error importing block which is well-formed: {:?}", e);
} }
} }
client.flush_queue(); client.flush_queue();
@ -344,13 +293,13 @@ pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec<Bytes> { pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec<Bytes> {
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let test_engine = &test_spec.engine; let genesis_gas = test_spec.genesis_header().gas_limit().clone();
let mut rolling_timestamp = start_number as u64 * 10; let mut rolling_timestamp = start_number as u64 * 10;
let mut parent = *parent_hash; let mut parent = *parent_hash;
let mut r = Vec::new(); let mut r = Vec::new();
for i in start_number .. start_number + count + 1 { for i in start_number .. start_number + count + 1 {
let mut block_header = Header::new(); let mut block_header = Header::new();
block_header.set_gas_limit(test_engine.params().min_gas_limit); block_header.set_gas_limit(genesis_gas);
block_header.set_difficulty(U256::from(i) * U256([0, 1, 0, 0])); block_header.set_difficulty(U256::from(i) * U256([0, 1, 0, 0]));
block_header.set_timestamp(rolling_timestamp); block_header.set_timestamp(rolling_timestamp);
block_header.set_number(i as u64); block_header.set_number(i as u64);
@ -368,8 +317,8 @@ pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_h
pub fn get_good_dummy_block_hash() -> (H256, Bytes) { pub fn get_good_dummy_block_hash() -> (H256, Bytes) {
let mut block_header = Header::new(); let mut block_header = Header::new();
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let test_engine = &test_spec.engine; let genesis_gas = test_spec.genesis_header().gas_limit().clone();
block_header.set_gas_limit(test_engine.params().min_gas_limit); block_header.set_gas_limit(genesis_gas);
block_header.set_difficulty(U256::from(0x20000)); block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40); block_header.set_timestamp(40);
block_header.set_number(1); block_header.set_number(1);
@ -387,8 +336,9 @@ pub fn get_good_dummy_block() -> Bytes {
pub fn get_bad_state_dummy_block() -> Bytes { pub fn get_bad_state_dummy_block() -> Bytes {
let mut block_header = Header::new(); let mut block_header = Header::new();
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let test_engine = &test_spec.engine; let genesis_gas = test_spec.genesis_header().gas_limit().clone();
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_gas_limit(genesis_gas);
block_header.set_difficulty(U256::from(0x20000)); block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40); block_header.set_timestamp(40);
block_header.set_number(1); block_header.set_number(1);
@ -398,33 +348,35 @@ pub fn get_bad_state_dummy_block() -> Bytes {
create_test_block(&block_header) create_test_block(&block_header)
} }
pub fn get_default_ethash_extensions() -> EthashExtensions {
EthashExtensions {
homestead_transition: 1150000,
eip150_transition: u64::max_value(),
eip160_transition: u64::max_value(),
eip161abc_transition: u64::max_value(),
eip161d_transition: u64::max_value(),
dao_hardfork_transition: u64::max_value(),
dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(),
dao_hardfork_accounts: Vec::new(),
}
}
pub fn get_default_ethash_params() -> EthashParams { pub fn get_default_ethash_params() -> EthashParams {
EthashParams { EthashParams {
minimum_difficulty: U256::from(131072), minimum_difficulty: U256::from(131072),
difficulty_bound_divisor: U256::from(2048), difficulty_bound_divisor: U256::from(2048),
difficulty_increment_divisor: 10, difficulty_increment_divisor: 10,
metropolis_difficulty_increment_divisor: 9, metropolis_difficulty_increment_divisor: 9,
duration_limit: 13,
homestead_transition: 1150000, homestead_transition: 1150000,
dao_hardfork_transition: u64::max_value(), duration_limit: 13,
dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(), block_reward: 0.into(),
dao_hardfork_accounts: vec![],
difficulty_hardfork_transition: u64::max_value(), difficulty_hardfork_transition: u64::max_value(),
difficulty_hardfork_bound_divisor: U256::from(0), difficulty_hardfork_bound_divisor: U256::from(0),
bomb_defuse_transition: u64::max_value(), bomb_defuse_transition: u64::max_value(),
eip100b_transition: u64::max_value(), eip100b_transition: u64::max_value(),
eip150_transition: u64::max_value(),
eip160_transition: u64::max_value(),
eip161abc_transition: u64::max_value(),
eip161d_transition: u64::max_value(),
ecip1010_pause_transition: u64::max_value(), ecip1010_pause_transition: u64::max_value(),
ecip1010_continue_transition: u64::max_value(), ecip1010_continue_transition: u64::max_value(),
ecip1017_era_rounds: u64::max_value(), ecip1017_era_rounds: u64::max_value(),
max_code_size: u64::max_value(),
max_gas_limit_transition: u64::max_value(),
max_gas_limit: U256::max_value(),
min_gas_price_transition: u64::max_value(),
min_gas_price: U256::zero(),
eip649_transition: u64::max_value(), eip649_transition: u64::max_value(),
eip649_delay: 3_000_000, eip649_delay: 3_000_000,
eip649_reward: None, eip649_reward: None,

View File

@ -65,6 +65,8 @@ fn can_trace_block_and_uncle_reward() {
// block with transaction and uncle // block with transaction and uncle
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let genesis_gas = genesis_header.gas_limit().clone();
let mut db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let mut db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let mut rolling_timestamp = 40; let mut rolling_timestamp = 40;
let mut last_hashes = vec![]; let mut last_hashes = vec![];
@ -165,7 +167,7 @@ fn can_trace_block_and_uncle_reward() {
let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into(); let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into();
uncle.set_author(uncle_author); uncle.set_author(uncle_author);
uncle.set_parent_hash(root_header.hash()); uncle.set_parent_hash(root_header.hash());
uncle.set_gas_limit(U256::from(50_000)); uncle.set_gas_limit(genesis_gas);
uncle.set_number(root_header.number() + 1); uncle.set_number(root_header.number() + 1);
uncle.set_timestamp(rolling_timestamp); uncle.set_timestamp(rolling_timestamp);
block.push_uncle(uncle).unwrap(); block.push_uncle(uncle).unwrap();
@ -186,6 +188,8 @@ fn can_trace_block_and_uncle_reward() {
range: (BlockId::Number(1)..BlockId::Number(3)), range: (BlockId::Number(1)..BlockId::Number(3)),
from_address: vec![], from_address: vec![],
to_address: vec![], to_address: vec![],
after: None,
count: None,
}; };
let traces = client.filter_traces(filter); let traces = client.filter_traces(filter);

View File

@ -16,15 +16,14 @@
//! Smart contract based transaction filter. //! Smart contract based transaction filter.
use std::sync::Weak;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
use bigint::hash::H256; use bigint::hash::H256;
use native_contracts::TransactAcl as Contract; use native_contracts::TransactAcl as Contract;
use client::{EngineClient, BlockId, ChainNotify}; use client::{BlockChainClient, BlockId, ChainNotify};
use util::Address; use util::Address;
use bytes::Bytes; use bytes::Bytes;
use parking_lot::{Mutex, RwLock}; use parking_lot::Mutex;
use futures::{self, Future}; use futures::{self, Future};
use spec::CommonParams; use spec::CommonParams;
use transaction::{Action, SignedTransaction}; use transaction::{Action, SignedTransaction};
@ -44,7 +43,6 @@ mod tx_permissions {
/// Connection filter that uses a contract to manage permissions. /// Connection filter that uses a contract to manage permissions.
pub struct TransactionFilter { pub struct TransactionFilter {
contract: Mutex<Option<Contract>>, contract: Mutex<Option<Contract>>,
client: RwLock<Option<Weak<EngineClient>>>,
contract_address: Address, contract_address: Address,
permission_cache: Mutex<HashMap<(H256, Address), u32>>, permission_cache: Mutex<HashMap<(H256, Address), u32>>,
} }
@ -55,7 +53,6 @@ impl TransactionFilter {
params.transaction_permission_contract.map(|address| params.transaction_permission_contract.map(|address|
TransactionFilter { TransactionFilter {
contract: Mutex::new(None), contract: Mutex::new(None),
client: RwLock::new(None),
contract_address: address, contract_address: address,
permission_cache: Mutex::new(HashMap::new()), permission_cache: Mutex::new(HashMap::new()),
} }
@ -67,24 +64,9 @@ impl TransactionFilter {
self.permission_cache.lock().clear(); self.permission_cache.lock().clear();
} }
/// Set client reference to be used for contract call.
pub fn register_client(&self, client: Weak<EngineClient>) {
*self.client.write() = Some(client);
}
/// Check if transaction is allowed at given block. /// Check if transaction is allowed at given block.
pub fn transaction_allowed(&self, parent_hash: &H256, transaction: &SignedTransaction) -> bool { pub fn transaction_allowed(&self, parent_hash: &H256, transaction: &SignedTransaction, client: &BlockChainClient) -> bool {
self.client.read().as_ref().map_or(false, |client| {
let mut cache = self.permission_cache.lock(); let len = cache.len(); let mut cache = self.permission_cache.lock(); let len = cache.len();
let client = match client.upgrade() {
Some(client) => client,
_ => return false,
};
let client = match client.as_full_client() {
Some(client) => client,
_ => return false, // TODO: how to handle verification for light clients?
};
let tx_type = match transaction.action { let tx_type = match transaction.action {
Action::Create => tx_permissions::CREATE, Action::Create => tx_permissions::CREATE,
@ -123,7 +105,6 @@ impl TransactionFilter {
permissions & tx_type != 0 permissions & tx_type != 0
} }
} }
})
} }
} }
@ -137,7 +118,7 @@ impl ChainNotify for TransactionFilter {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use std::sync::{Arc, Weak}; use std::sync::Arc;
use spec::Spec; use spec::Spec;
use client::{BlockChainClient, Client, ClientConfig, BlockId}; use client::{BlockChainClient, Client, ClientConfig, BlockId};
use miner::Miner; use miner::Miner;
@ -196,7 +177,7 @@ mod test {
} }
"#; "#;
let spec = Spec::load(::std::env::temp_dir(), spec_data.as_bytes()).unwrap(); let spec = Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap();
let client_db = Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let client_db = Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
let client = Client::new( let client = Client::new(
@ -212,7 +193,6 @@ mod test {
let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap(); let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap();
let filter = TransactionFilter::from_params(spec.params()).unwrap(); let filter = TransactionFilter::from_params(spec.params()).unwrap();
filter.register_client(Arc::downgrade(&client) as Weak<_>);
let mut basic_tx = Transaction::default(); let mut basic_tx = Transaction::default();
basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032")); basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032"));
let create_tx = Transaction::default(); let create_tx = Transaction::default();
@ -221,21 +201,21 @@ mod test {
let genesis = client.block_hash(BlockId::Latest).unwrap(); let genesis = client.block_hash(BlockId::Latest).unwrap();
assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key1.secret(), None))); assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key1.secret(), None), &*client));
assert!(filter.transaction_allowed(&genesis, &create_tx.clone().sign(key1.secret(), None))); assert!(filter.transaction_allowed(&genesis, &create_tx.clone().sign(key1.secret(), None), &*client));
assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key1.secret(), None))); assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key1.secret(), None), &*client));
assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key2.secret(), None))); assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key2.secret(), None), &*client));
assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key2.secret(), None))); assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key2.secret(), None), &*client));
assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key2.secret(), None))); assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key2.secret(), None), &*client));
assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key3.secret(), None))); assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key3.secret(), None), &*client));
assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key3.secret(), None))); assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key3.secret(), None), &*client));
assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key3.secret(), None))); assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key3.secret(), None), &*client));
assert!(!filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key4.secret(), None))); assert!(!filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key4.secret(), None), &*client));
assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key4.secret(), None))); assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key4.secret(), None), &*client));
assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key4.secret(), None))); assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key4.secret(), None), &*client));
} }
} }

View File

@ -16,8 +16,7 @@
//! Canonical verifier. //! Canonical verifier.
use blockchain::BlockProvider; use engines::EthEngine;
use engines::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use super::Verifier; use super::Verifier;
@ -27,15 +26,21 @@ use super::verification;
pub struct CanonVerifier; pub struct CanonVerifier;
impl Verifier for CanonVerifier { impl Verifier for CanonVerifier {
fn verify_block_family(&self, header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error> { fn verify_block_family(
verification::verify_block_family(header, bytes, engine, bc) &self,
header: &Header,
parent: &Header,
engine: &EthEngine,
do_full: Option<verification::FullFamilyParams>,
) -> Result<(), Error> {
verification::verify_block_family(header, parent, engine, do_full)
} }
fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error> { fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error> {
verification::verify_block_final(expected, got) verification::verify_block_final(expected, got)
} }
fn verify_block_external(&self, header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> { fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error> {
engine.verify_block_external(header, Some(bytes)) engine.verify_block_external(header)
} }
} }

View File

@ -16,18 +16,23 @@
//! No-op verifier. //! No-op verifier.
use blockchain::BlockProvider; use engines::EthEngine;
use engines::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use super::Verifier; use super::{verification, Verifier};
/// A no-op verifier -- this will verify everything it's given immediately. /// A no-op verifier -- this will verify everything it's given immediately.
#[allow(dead_code)] #[allow(dead_code)]
pub struct NoopVerifier; pub struct NoopVerifier;
impl Verifier for NoopVerifier { impl Verifier for NoopVerifier {
fn verify_block_family(&self, _header: &Header, _bytes: &[u8], _engine: &Engine, _bc: &BlockProvider) -> Result<(), Error> { fn verify_block_family(
&self,
_: &Header,
_t: &Header,
_: &EthEngine,
_: Option<verification::FullFamilyParams>
) -> Result<(), Error> {
Ok(()) Ok(())
} }
@ -35,7 +40,7 @@ impl Verifier for NoopVerifier {
Ok(()) Ok(())
} }
fn verify_block_external(&self, _header: &Header, _bytes: &[u8], _engine: &Engine) -> Result<(), Error> { fn verify_block_external(&self, _header: &Header, _engine: &EthEngine) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@ -16,7 +16,7 @@
//! Definition of valid items for the verification queue. //! Definition of valid items for the verification queue.
use engines::Engine; use engines::EthEngine;
use error::Error; use error::Error;
use heapsize::HeapSizeOf; use heapsize::HeapSizeOf;
@ -59,17 +59,17 @@ pub trait Kind: 'static + Sized + Send + Sync {
type Verified: Sized + Send + BlockLike + HeapSizeOf; type Verified: Sized + Send + BlockLike + HeapSizeOf;
/// Attempt to create the `Unverified` item from the input. /// Attempt to create the `Unverified` item from the input.
fn create(input: Self::Input, engine: &Engine) -> Result<Self::Unverified, Error>; fn create(input: Self::Input, engine: &EthEngine) -> Result<Self::Unverified, Error>;
/// Attempt to verify the `Unverified` item using the given engine. /// Attempt to verify the `Unverified` item using the given engine.
fn verify(unverified: Self::Unverified, engine: &Engine, check_seal: bool) -> Result<Self::Verified, Error>; fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result<Self::Verified, Error>;
} }
/// The blocks verification module. /// The blocks verification module.
pub mod blocks { pub mod blocks {
use super::{Kind, BlockLike}; use super::{Kind, BlockLike};
use engines::Engine; use engines::EthEngine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered}; use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered};
@ -87,7 +87,7 @@ pub mod blocks {
type Unverified = Unverified; type Unverified = Unverified;
type Verified = PreverifiedBlock; type Verified = PreverifiedBlock;
fn create(input: Self::Input, engine: &Engine) -> Result<Self::Unverified, Error> { fn create(input: Self::Input, engine: &EthEngine) -> Result<Self::Unverified, Error> {
match verify_block_basic(&input.header, &input.bytes, engine) { match verify_block_basic(&input.header, &input.bytes, engine) {
Ok(()) => Ok(input), Ok(()) => Ok(input),
Err(e) => { Err(e) => {
@ -97,7 +97,7 @@ pub mod blocks {
} }
} }
fn verify(un: Self::Unverified, engine: &Engine, check_seal: bool) -> Result<Self::Verified, Error> { fn verify(un: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result<Self::Verified, Error> {
let hash = un.hash(); let hash = un.hash();
match verify_block_unordered(un.header, un.bytes, engine, check_seal) { match verify_block_unordered(un.header, un.bytes, engine, check_seal) {
Ok(verified) => Ok(verified), Ok(verified) => Ok(verified),
@ -167,7 +167,7 @@ pub mod blocks {
pub mod headers { pub mod headers {
use super::{Kind, BlockLike}; use super::{Kind, BlockLike};
use engines::Engine; use engines::EthEngine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use verification::verify_header_params; use verification::verify_header_params;
@ -189,13 +189,13 @@ pub mod headers {
type Unverified = Header; type Unverified = Header;
type Verified = Header; type Verified = Header;
fn create(input: Self::Input, engine: &Engine) -> Result<Self::Unverified, Error> { fn create(input: Self::Input, engine: &EthEngine) -> Result<Self::Unverified, Error> {
verify_header_params(&input, engine, true).map(|_| input) verify_header_params(&input, engine, true).map(|_| input)
} }
fn verify(unverified: Self::Unverified, engine: &Engine, check_seal: bool) -> Result<Self::Verified, Error> { fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result<Self::Verified, Error> {
match check_seal { match check_seal {
true => engine.verify_block_unordered(&unverified, None).map(|_| unverified), true => engine.verify_block_unordered(&unverified,).map(|_| unverified),
false => Ok(unverified), false => Ok(unverified),
} }
} }

View File

@ -28,7 +28,7 @@ use bigint::hash::H256;
use parking_lot::{Condvar, Mutex, RwLock}; use parking_lot::{Condvar, Mutex, RwLock};
use io::*; use io::*;
use error::*; use error::*;
use engines::Engine; use engines::EthEngine;
use service::*; use service::*;
use self::kind::{BlockLike, Kind}; use self::kind::{BlockLike, Kind};
@ -141,7 +141,7 @@ struct Sizes {
/// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`. /// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`.
/// Keeps them in the same order as inserted, minus invalid items. /// Keeps them in the same order as inserted, minus invalid items.
pub struct VerificationQueue<K: Kind> { pub struct VerificationQueue<K: Kind> {
engine: Arc<Engine>, engine: Arc<EthEngine>,
more_to_verify: Arc<SCondvar>, more_to_verify: Arc<SCondvar>,
verification: Arc<Verification<K>>, verification: Arc<Verification<K>>,
deleting: Arc<AtomicBool>, deleting: Arc<AtomicBool>,
@ -213,7 +213,7 @@ struct Verification<K: Kind> {
impl<K: Kind> VerificationQueue<K> { impl<K: Kind> VerificationQueue<K> {
/// Creates a new queue instance. /// Creates a new queue instance.
pub fn new(config: Config, engine: Arc<Engine>, message_channel: IoChannel<ClientIoMessage>, check_seal: bool) -> Self { pub fn new(config: Config, engine: Arc<EthEngine>, message_channel: IoChannel<ClientIoMessage>, check_seal: bool) -> Self {
let verification = Arc::new(Verification { let verification = Arc::new(Verification {
unverified: Mutex::new(VecDeque::new()), unverified: Mutex::new(VecDeque::new()),
verifying: Mutex::new(VecDeque::new()), verifying: Mutex::new(VecDeque::new()),
@ -294,7 +294,7 @@ impl<K: Kind> VerificationQueue<K> {
fn verify( fn verify(
verification: Arc<Verification<K>>, verification: Arc<Verification<K>>,
engine: Arc<Engine>, engine: Arc<EthEngine>,
wait: Arc<SCondvar>, wait: Arc<SCondvar>,
ready: Arc<QueueSignal>, ready: Arc<QueueSignal>,
empty: Arc<SCondvar>, empty: Arc<SCondvar>,

View File

@ -22,20 +22,24 @@
//! 3. Final verification against the blockchain done before enactment. //! 3. Final verification against the blockchain done before enactment.
use std::collections::HashSet; use std::collections::HashSet;
use hash::keccak;
use triehash::ordered_trie_root;
use heapsize::HeapSizeOf;
use bigint::hash::H256;
use unexpected::{Mismatch, OutOfBounds};
use bytes::Bytes;
use engines::Engine;
use error::{BlockError, Error};
use blockchain::*; use blockchain::*;
use client::BlockChainClient;
use engines::EthEngine;
use error::{BlockError, Error};
use header::{BlockNumber, Header}; use header::{BlockNumber, Header};
use rlp::UntrustedRlp;
use transaction::SignedTransaction; use transaction::SignedTransaction;
use views::BlockView; use views::BlockView;
use bigint::hash::H256;
use bigint::prelude::U256;
use bytes::Bytes;
use hash::keccak;
use heapsize::HeapSizeOf;
use rlp::UntrustedRlp;
use time::get_time; use time::get_time;
use triehash::ordered_trie_root;
use unexpected::{Mismatch, OutOfBounds};
/// Preprocessed block data gathered in `verify_block_unordered` call /// Preprocessed block data gathered in `verify_block_unordered` call
pub struct PreverifiedBlock { pub struct PreverifiedBlock {
@ -56,14 +60,14 @@ impl HeapSizeOf for PreverifiedBlock {
} }
/// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block
pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> { pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &EthEngine) -> Result<(), Error> {
verify_header_params(&header, engine, true)?; verify_header_params(&header, engine, true)?;
verify_block_integrity(bytes, &header.transactions_root(), &header.uncles_hash())?; verify_block_integrity(bytes, &header.transactions_root(), &header.uncles_hash())?;
engine.verify_block_basic(&header, Some(bytes))?; engine.verify_block_basic(&header)?;
for u in UntrustedRlp::new(bytes).at(2)?.iter().map(|rlp| rlp.as_val::<Header>()) { for u in UntrustedRlp::new(bytes).at(2)?.iter().map(|rlp| rlp.as_val::<Header>()) {
let u = u?; let u = u?;
verify_header_params(&u, engine, false)?; verify_header_params(&u, engine, false)?;
engine.verify_block_basic(&u, None)?; engine.verify_block_basic(&u)?;
} }
// Verify transactions. // Verify transactions.
// TODO: either use transaction views or cache the decoded transactions. // TODO: either use transaction views or cache the decoded transactions.
@ -77,11 +81,11 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
/// Still operates on a individual block /// Still operates on a individual block
/// Returns a `PreverifiedBlock` structure populated with transactions /// Returns a `PreverifiedBlock` structure populated with transactions
pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine, check_seal: bool) -> Result<PreverifiedBlock, Error> { pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &EthEngine, check_seal: bool) -> Result<PreverifiedBlock, Error> {
if check_seal { if check_seal {
engine.verify_block_unordered(&header, Some(&bytes))?; engine.verify_block_unordered(&header)?;
for u in UntrustedRlp::new(&bytes).at(2)?.iter().map(|rlp| rlp.as_val::<Header>()) { for u in UntrustedRlp::new(&bytes).at(2)?.iter().map(|rlp| rlp.as_val::<Header>()) {
engine.verify_block_unordered(&u?, None)?; engine.verify_block_unordered(&u?)?;
} }
} }
// Verify transactions. // Verify transactions.
@ -92,7 +96,7 @@ pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine, che
{ {
let v = BlockView::new(&bytes); let v = BlockView::new(&bytes);
for t in v.transactions() { for t in v.transactions() {
let t = engine.verify_transaction(t, &header)?; let t = engine.verify_transaction_unordered(t, &header)?;
if let Some(max_nonce) = nonce_cap { if let Some(max_nonce) = nonce_cap {
if t.nonce >= max_nonce { if t.nonce >= max_nonce {
return Err(BlockError::TooManyTransactions(t.sender()).into()); return Err(BlockError::TooManyTransactions(t.sender()).into());
@ -108,13 +112,30 @@ pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine, che
}) })
} }
/// Phase 3 verification. Check block information against parent and uncles. /// Parameters for full verification of block family: block bytes, transactions, blockchain, and state access.
pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error> { pub type FullFamilyParams<'a> = (&'a [u8], &'a [SignedTransaction], &'a BlockProvider, &'a BlockChainClient);
// TODO: verify timestamp
let parent = bc.block_header(&header.parent_hash()).ok_or_else(|| Error::from(BlockError::UnknownParent(header.parent_hash().clone())))?;
verify_parent(&header, &parent)?;
engine.verify_block_family(&header, &parent, Some(bytes))?;
/// Phase 3 verification. Check block information against parent and uncles.
pub fn verify_block_family(header: &Header, parent: &Header, engine: &EthEngine, do_full: Option<FullFamilyParams>) -> Result<(), Error> {
// TODO: verify timestamp
verify_parent(&header, &parent, engine.params().gas_limit_bound_divisor)?;
engine.verify_block_family(&header, &parent)?;
let (bytes, txs, bc, client) = match do_full {
Some(x) => x,
None => return Ok(()),
};
verify_uncles(header, bytes, bc, engine)?;
for transaction in txs {
engine.machine().verify_transaction(transaction, header, client)?;
}
Ok(())
}
fn verify_uncles(header: &Header, bytes: &[u8], bc: &BlockProvider, engine: &EthEngine) -> Result<(), Error> {
let num_uncles = UntrustedRlp::new(bytes).at(2)?.item_count()?; let num_uncles = UntrustedRlp::new(bytes).at(2)?.item_count()?;
if num_uncles != 0 { if num_uncles != 0 {
if num_uncles > engine.maximum_uncle_count() { if num_uncles > engine.maximum_uncle_count() {
@ -189,11 +210,12 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &
return Err(From::from(BlockError::UncleParentNotInChain(uncle_parent.hash()))); return Err(From::from(BlockError::UncleParentNotInChain(uncle_parent.hash())));
} }
verify_parent(&uncle, &uncle_parent)?; verify_parent(&uncle, &uncle_parent, engine.params().gas_limit_bound_divisor)?;
engine.verify_block_family(&uncle, &uncle_parent, Some(bytes))?; engine.verify_block_family(&uncle, &uncle_parent)?;
verified.insert(uncle.hash()); verified.insert(uncle.hash());
} }
} }
Ok(()) Ok(())
} }
@ -215,7 +237,13 @@ pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error>
} }
/// Check basic header parameters. /// Check basic header parameters.
pub fn verify_header_params(header: &Header, engine: &Engine, is_full: bool) -> Result<(), Error> { pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool) -> Result<(), Error> {
if header.seal().len() != engine.seal_fields() {
return Err(From::from(BlockError::InvalidSealArity(
Mismatch { expected: engine.seal_fields(), found: header.seal().len() }
)));
}
if header.number() >= From::from(BlockNumber::max_value()) { if header.number() >= From::from(BlockNumber::max_value()) {
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number() }))) return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number() })))
} }
@ -230,6 +258,15 @@ pub fn verify_header_params(header: &Header, engine: &Engine, is_full: bool) ->
if header.number() != 0 && header.extra_data().len() > maximum_extra_data_size { if header.number() != 0 && header.extra_data().len() > maximum_extra_data_size {
return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: Some(maximum_extra_data_size), found: header.extra_data().len() }))); return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: Some(maximum_extra_data_size), found: header.extra_data().len() })));
} }
if let Some(ref ext) = engine.machine().ethash_extensions() {
if header.number() >= ext.dao_hardfork_transition &&
header.number() <= ext.dao_hardfork_transition + 9 &&
header.extra_data()[..] != b"dao-hard-fork"[..] {
return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: None, found: 0 })));
}
}
if is_full { if is_full {
let max_time = get_time().sec as u64 + 30; let max_time = get_time().sec as u64 + 30;
if header.timestamp() > max_time { if header.timestamp() > max_time {
@ -240,7 +277,7 @@ pub fn verify_header_params(header: &Header, engine: &Engine, is_full: bool) ->
} }
/// Check header parameters agains parent header. /// Check header parameters agains parent header.
fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> { fn verify_parent(header: &Header, parent: &Header, gas_limit_divisor: U256) -> Result<(), Error> {
if !header.parent_hash().is_zero() && &parent.hash() != header.parent_hash() { if !header.parent_hash().is_zero() && &parent.hash() != header.parent_hash() {
return Err(From::from(BlockError::InvalidParentHash(Mismatch { expected: parent.hash(), found: header.parent_hash().clone() }))) return Err(From::from(BlockError::InvalidParentHash(Mismatch { expected: parent.hash(), found: header.parent_hash().clone() })))
} }
@ -250,6 +287,18 @@ fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> {
if header.number() != parent.number() + 1 { if header.number() != parent.number() + 1 {
return Err(From::from(BlockError::InvalidNumber(Mismatch { expected: parent.number() + 1, found: header.number() }))); return Err(From::from(BlockError::InvalidNumber(Mismatch { expected: parent.number() + 1, found: header.number() })));
} }
if header.number() == 0 {
return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into());
}
let parent_gas_limit = *parent.gas_limit();
let min_gas = parent_gas_limit - parent_gas_limit / gas_limit_divisor;
let max_gas = parent_gas_limit + parent_gas_limit / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() })));
}
Ok(()) Ok(())
} }
@ -285,7 +334,7 @@ mod tests {
use error::BlockError::*; use error::BlockError::*;
use views::*; use views::*;
use blockchain::*; use blockchain::*;
use engines::Engine; use engines::EthEngine;
use spec::*; use spec::*;
use transaction::*; use transaction::*;
use tests::helpers::*; use tests::helpers::*;
@ -406,17 +455,38 @@ mod tests {
} }
} }
fn basic_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> { fn basic_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> {
let header = BlockView::new(bytes).header(); let header = BlockView::new(bytes).header();
verify_block_basic(&header, bytes, engine) verify_block_basic(&header, bytes, engine)
} }
fn family_test<BC>(bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { fn family_test<BC>(bytes: &[u8], engine: &EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider {
let header = BlockView::new(bytes).header(); let view = BlockView::new(bytes);
verify_block_family(&header, bytes, engine, bc) let header = view.header();
let transactions: Vec<_> = view.transactions()
.into_iter()
.map(SignedTransaction::new)
.collect::<Result<_,_>>()?;
// TODO: client is really meant to be used for state query here by machine
// additions that need access to state (tx filter in specific)
// no existing tests need access to test, so having this not function
// is fine.
let client = ::client::TestBlockChainClient::default();
let parent = bc.block_header(header.parent_hash())
.ok_or(BlockError::UnknownParent(header.parent_hash().clone()))?;
let full_params: FullFamilyParams = (
bytes,
&transactions[..],
bc as &BlockProvider,
&client as &::client::BlockChainClient
);
verify_block_family(&header, &parent, engine, Some(full_params))
} }
fn unordered_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> { fn unordered_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> {
let header = BlockView::new(bytes).header(); let header = BlockView::new(bytes).header();
verify_block_unordered(header, bytes.to_vec(), engine, false)?; verify_block_unordered(header, bytes.to_vec(), engine, false)?;
Ok(()) Ok(())
@ -590,6 +660,15 @@ mod tests {
check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine, &bc), check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine, &bc),
DuplicateUncle(good_uncle1.hash())); DuplicateUncle(good_uncle1.hash()));
header = good.clone();
header.set_gas_limit(0.into());
header.set_difficulty("0000000000000000000000000000000000000000000000000000000000020000".parse::<U256>().unwrap());
match family_test(&create_test_block(&header), engine, &bc) {
Err(Error::Block(InvalidGasLimit(_))) => {},
Err(_) => { panic!("should be invalid difficulty fail"); },
_ => { panic!("Should be error, got Ok"); },
}
// TODO: some additional uncle checks // TODO: some additional uncle checks
} }
@ -597,6 +676,7 @@ mod tests {
fn dust_protection() { fn dust_protection() {
use ethkey::{Generator, Random}; use ethkey::{Generator, Random};
use transaction::{Transaction, Action}; use transaction::{Transaction, Action};
use machine::EthereumMachine;
use engines::NullEngine; use engines::NullEngine;
let mut params = CommonParams::default(); let mut params = CommonParams::default();
@ -618,7 +698,8 @@ mod tests {
let good_transactions = [bad_transactions[0].clone(), bad_transactions[1].clone()]; let good_transactions = [bad_transactions[0].clone(), bad_transactions[1].clone()];
let engine = NullEngine::new(params, BTreeMap::new()); let machine = EthereumMachine::regular(params, BTreeMap::new());
let engine = NullEngine::new(Default::default(), machine);
check_fail(unordered_test(&create_test_block_with_data(&header, &bad_transactions, &[]), &engine), TooManyTransactions(keypair.address())); check_fail(unordered_test(&create_test_block_with_data(&header, &bad_transactions, &[]), &engine), TooManyTransactions(keypair.address()));
unordered_test(&create_test_block_with_data(&header, &good_transactions, &[]), &engine).unwrap(); unordered_test(&create_test_block_with_data(&header, &good_transactions, &[]), &engine).unwrap();
} }

View File

@ -16,17 +16,24 @@
//! A generic verifier trait. //! A generic verifier trait.
use blockchain::BlockProvider; use engines::EthEngine;
use engines::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use super::verification;
/// Should be used to verify blocks. /// Should be used to verify blocks.
pub trait Verifier: Send + Sync { pub trait Verifier: Send + Sync {
/// Verify a block relative to its parent and uncles. /// Verify a block relative to its parent and uncles.
fn verify_block_family(&self, header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error>; fn verify_block_family(
&self,
header: &Header,
parent: &Header,
engine: &EthEngine,
do_full: Option<verification::FullFamilyParams>
) -> Result<(), Error>;
/// Do a final verification check for an enacted header vs its expected counterpart. /// Do a final verification check for an enacted header vs its expected counterpart.
fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>;
/// Verify a block, inspecing external state. /// Verify a block, inspecing external state.
fn verify_block_external(&self, header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error>; fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error>;
} }

View File

@ -28,4 +28,8 @@ pub struct Filter {
pub from_address: Vec<Address>, pub from_address: Vec<Address>,
/// To address. /// To address.
pub to_address: Vec<Address>, pub to_address: Vec<Address>,
/// Output offset
pub after: Option<usize>,
/// Output amount
pub count: Option<usize>,
} }

View File

@ -230,7 +230,7 @@ impl Args {
Ok(match self.flag_chain { Ok(match self.flag_chain {
Some(ref filename) => { Some(ref filename) => {
let file = fs::File::open(filename).map_err(|e| format!("{}", e))?; let file = fs::File::open(filename).map_err(|e| format!("{}", e))?;
spec::Spec::load(::std::env::temp_dir(), file)? spec::Spec::load(&::std::env::temp_dir(), file)?
}, },
None => { None => {
ethcore::ethereum::new_foundation(&::std::env::temp_dir()) ethcore::ethereum::new_foundation(&::std::env::temp_dir())

View File

@ -52,7 +52,7 @@ pub trait IpcConfig {
/// Error in dispatching or invoking methods via IPC /// Error in dispatching or invoking methods via IPC
#[derive(Debug)] #[derive(Debug)]
pub enum Error { pub enum Error {
UnkownSystemCall, UnknownSystemCall,
ClientUnsupported, ClientUnsupported,
RemoteServiceUnsupported, RemoteServiceUnsupported,
HandshakeFailed, HandshakeFailed,

2
js/package-lock.json generated
View File

@ -1,6 +1,6 @@
{ {
"name": "Parity", "name": "Parity",
"version": "1.8.21", "version": "1.8.25",
"lockfileVersion": 1, "lockfileVersion": 1,
"requires": true, "requires": true,
"dependencies": { "dependencies": {

View File

@ -1,6 +1,6 @@
{ {
"name": "Parity", "name": "Parity",
"version": "1.8.21", "version": "1.8.25",
"main": "src/index.parity.js", "main": "src/index.parity.js",
"jsnext:main": "src/index.parity.js", "jsnext:main": "src/index.parity.js",
"author": "Parity Team <admin@parity.io>", "author": "Parity Team <admin@parity.io>",

View File

@ -80,16 +80,23 @@ class FirstRun extends Component {
hasAccounts: PropTypes.bool.isRequired, hasAccounts: PropTypes.bool.isRequired,
newError: PropTypes.func.isRequired, newError: PropTypes.func.isRequired,
onClose: PropTypes.func.isRequired, onClose: PropTypes.func.isRequired,
visible: PropTypes.bool.isRequired visible: PropTypes.bool.isRequired,
isTest: PropTypes.bool.isRequired
} }
createStore = new CreateStore(this.context.api, {}, true, false); createStore = new CreateStore(this.context.api, {}, this.props.isTest, false);
state = { state = {
stage: 0, stage: 0,
hasAcceptedTnc: false hasAcceptedTnc: false
} }
componentWillReceiveProps (nextProps) {
if (nextProps.isTest !== this.props.isTest) {
this.createStore.setIsTest(nextProps.isTest);
}
}
render () { render () {
const { visible } = this.props; const { visible } = this.props;
const { stage } = this.state; const { stage } = this.state;
@ -350,9 +357,10 @@ class FirstRun extends Component {
function mapStateToProps (state) { function mapStateToProps (state) {
const { hasAccounts } = state.personal; const { hasAccounts } = state.personal;
const { isTest } = state.nodeStatus;
return { return {
hasAccounts hasAccounts, isTest
}; };
} }

View File

@ -35,6 +35,9 @@ function createRedux () {
return { return {
personal: { personal: {
hasAccounts: false hasAccounts: false
},
nodeStatus: {
isTest: false
} }
}; };
} }

View File

@ -98,7 +98,7 @@ export default class SecureApi extends Api {
return { return {
host, host,
port: parseInt(port, 10) port: port ? parseInt(port, 10) : null
}; };
} }
@ -109,7 +109,9 @@ export default class SecureApi extends Api {
get dappsUrl () { get dappsUrl () {
const { port } = this._dappsAddress; const { port } = this._dappsAddress;
return `${this.protocol()}//${this.hostname}:${port}`; return port
? `${this.protocol()}//${this.hostname}:${port}`
: `${this.protocol()}//${this.hostname}`;
} }
get hostname () { get hostname () {
@ -262,7 +264,7 @@ export default class SecureApi extends Api {
.then(() => true); .then(() => true);
}) })
.catch((error) => { .catch((error) => {
log.error('unkown error in _connect', error); log.error('unknown error in _connect', error);
return false; return false;
}); });
} }

View File

@ -40,6 +40,9 @@ pub struct AuthorityRoundParams {
/// Whether transitions should be immediate. /// Whether transitions should be immediate.
#[serde(rename="immediateTransitions")] #[serde(rename="immediateTransitions")]
pub immediate_transitions: Option<bool>, pub immediate_transitions: Option<bool>,
/// Reward per block in wei.
#[serde(rename="blockReward")]
pub block_reward: Option<Uint>,
} }
/// Authority engine deserialization. /// Authority engine deserialization.
@ -67,7 +70,8 @@ mod tests {
"list" : ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] "list" : ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"]
}, },
"startStep" : 24, "startStep" : 24,
"validateStepTransition": 150 "validateStepTransition": 150,
"blockReward": 5000000
} }
}"#; }"#;
@ -76,5 +80,6 @@ mod tests {
assert_eq!(deserialized.params.validators, ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))])); assert_eq!(deserialized.params.validators, ValidatorSet::List(vec![Address(H160::from("0xc6d9d2cd449a754c494264e1809c50e34d64562b"))]));
assert_eq!(deserialized.params.start_step, Some(Uint(U256::from(24)))); assert_eq!(deserialized.params.start_step, Some(Uint(U256::from(24))));
assert_eq!(deserialized.params.immediate_transitions, None); assert_eq!(deserialized.params.immediate_transitions, None);
} }
} }

View File

@ -16,14 +16,14 @@
//! Engine deserialization. //! Engine deserialization.
use super::{Ethash, BasicAuthority, AuthorityRound, Tendermint}; use super::{Ethash, BasicAuthority, AuthorityRound, Tendermint, NullEngine};
/// Engine deserialization. /// Engine deserialization.
#[derive(Debug, PartialEq, Deserialize)] #[derive(Debug, PartialEq, Deserialize)]
pub enum Engine { pub enum Engine {
/// Null engine. /// Null engine.
#[serde(rename="null")] #[serde(rename="null")]
Null, Null(NullEngine),
/// Instantly sealing engine. /// Instantly sealing engine.
#[serde(rename="instantSeal")] #[serde(rename="instantSeal")]
InstantSeal, InstantSeal,
@ -48,11 +48,18 @@ mod tests {
#[test] #[test]
fn engine_deserialization() { fn engine_deserialization() {
let s = r#"{ let s = r#"{
"null": null "null": {
"params": {
"blockReward": "0x0d"
}
}
}"#; }"#;
let deserialized: Engine = serde_json::from_str(s).unwrap(); let deserialized: Engine = serde_json::from_str(s).unwrap();
assert_eq!(Engine::Null, deserialized); match deserialized {
Engine::Null(_) => {}, // unit test in its own file.
_ => panic!(),
}
let s = r#"{ let s = r#"{
"instantSeal": null "instantSeal": null
@ -61,7 +68,7 @@ mod tests {
let deserialized: Engine = serde_json::from_str(s).unwrap(); let deserialized: Engine = serde_json::from_str(s).unwrap();
match deserialized { match deserialized {
Engine::InstantSeal => {}, // instant seal is unit tested in its own file. Engine::InstantSeal => {}, // instant seal is unit tested in its own file.
_ => assert!(false), _ => panic!(),
}; };
let s = r#"{ let s = r#"{
@ -82,7 +89,7 @@ mod tests {
let deserialized: Engine = serde_json::from_str(s).unwrap(); let deserialized: Engine = serde_json::from_str(s).unwrap();
match deserialized { match deserialized {
Engine::Ethash(_) => {}, // ethash is unit tested in its own file. Engine::Ethash(_) => {}, // ethash is unit tested in its own file.
_ => assert!(false), _ => panic!(),
}; };
let s = r#"{ let s = r#"{
@ -98,7 +105,7 @@ mod tests {
let deserialized: Engine = serde_json::from_str(s).unwrap(); let deserialized: Engine = serde_json::from_str(s).unwrap();
match deserialized { match deserialized {
Engine::BasicAuthority(_) => {}, // basicAuthority is unit tested in its own file. Engine::BasicAuthority(_) => {}, // basicAuthority is unit tested in its own file.
_ => assert!(false), _ => panic!(),
}; };
let s = r#"{ let s = r#"{
@ -116,7 +123,7 @@ mod tests {
let deserialized: Engine = serde_json::from_str(s).unwrap(); let deserialized: Engine = serde_json::from_str(s).unwrap();
match deserialized { match deserialized {
Engine::AuthorityRound(_) => {}, // AuthorityRound is unit tested in its own file. Engine::AuthorityRound(_) => {}, // AuthorityRound is unit tested in its own file.
_ => assert!(false), _ => panic!(),
}; };
let s = r#"{ let s = r#"{
@ -131,7 +138,7 @@ mod tests {
let deserialized: Engine = serde_json::from_str(s).unwrap(); let deserialized: Engine = serde_json::from_str(s).unwrap();
match deserialized { match deserialized {
Engine::Tendermint(_) => {}, // Tendermint is unit tested in its own file. Engine::Tendermint(_) => {}, // Tendermint is unit tested in its own file.
_ => assert!(false), _ => panic!(),
}; };
} }
} }

Some files were not shown because too many files have changed in this diff Show More