Merge branch 'signer-wsnotification' into signer-ui

This commit is contained in:
Tomasz Drwięga 2016-06-01 20:13:36 +02:00
commit 89deaef809
38 changed files with 1254 additions and 719 deletions

View File

@ -38,11 +38,16 @@ pub fn utils() -> Box<Endpoint> {
pub fn all_endpoints() -> Endpoints {
let mut pages = Endpoints::new();
pages.insert("proxy".to_owned(), ProxyPac::boxed());
pages.insert("proxy".into(), ProxyPac::boxed());
// Home page needs to be safe embed
// because we use Cross-Origin LocalStorage.
// TODO [ToDr] Account naming should be moved to parity.
pages.insert("home".into(), Box::new(
PageEndpoint::new_safe_to_embed(parity_dapps_builtins::App::default())
));
insert::<parity_dapps_status::App>(&mut pages, "status");
insert::<parity_dapps_status::App>(&mut pages, "parity");
insert::<parity_dapps_builtins::App>(&mut pages, "home");
wallet_page(&mut pages);
daodapp_page(&mut pages);

View File

@ -21,7 +21,7 @@ use hyper::{header, server, Decoder, Encoder, Next};
use hyper::net::HttpStream;
use std::io::Write;
use std::collections::HashMap;
use std::collections::BTreeMap;
#[derive(Debug, PartialEq, Default, Clone)]
pub struct EndpointPath {
@ -45,7 +45,7 @@ pub trait Endpoint : Send + Sync {
fn to_handler(&self, path: EndpointPath) -> Box<server::Handler<HttpStream>>;
}
pub type Endpoints = HashMap<String, Box<Endpoint>>;
pub type Endpoints = BTreeMap<String, Box<Endpoint>>;
pub type Handler = server::Handler<HttpStream>;
pub struct ContentHandler {

View File

@ -30,6 +30,8 @@ pub struct PageEndpoint<T : WebApp + 'static> {
pub app: Arc<T>,
/// Prefix to strip from the path (when `None` deducted from `app_id`)
pub prefix: Option<String>,
/// Safe to be loaded in frame by other origin. (use wisely!)
safe_to_embed: bool,
}
impl<T: WebApp + 'static> PageEndpoint<T> {
@ -37,6 +39,7 @@ impl<T: WebApp + 'static> PageEndpoint<T> {
PageEndpoint {
app: Arc::new(app),
prefix: None,
safe_to_embed: false,
}
}
@ -44,6 +47,18 @@ impl<T: WebApp + 'static> PageEndpoint<T> {
PageEndpoint {
app: Arc::new(app),
prefix: Some(prefix),
safe_to_embed: false,
}
}
/// Creates new `PageEndpoint` which can be safely used in iframe
/// even from different origin. It might be dangerous (clickjacking).
/// Use wisely!
pub fn new_safe_to_embed(app: T) -> Self {
PageEndpoint {
app: Arc::new(app),
prefix: None,
safe_to_embed: true,
}
}
}
@ -61,6 +76,7 @@ impl<T: WebApp> Endpoint for PageEndpoint<T> {
path: path,
file: None,
write_pos: 0,
safe_to_embed: self.safe_to_embed,
})
}
}
@ -83,6 +99,7 @@ struct PageHandler<T: WebApp + 'static> {
path: EndpointPath,
file: Option<String>,
write_pos: usize,
safe_to_embed: bool,
}
impl<T: WebApp + 'static> PageHandler<T> {
@ -128,6 +145,9 @@ impl<T: WebApp + 'static> server::Handler<HttpStream> for PageHandler<T> {
if let Some(f) = self.file.as_ref().and_then(|f| self.app.file(f)) {
res.set_status(StatusCode::Ok);
res.headers_mut().set(header::ContentType(f.content_type.parse().unwrap()));
if !self.safe_to_embed {
res.headers_mut().set_raw("X-Frame-Options", vec![b"SAMEORIGIN".to_vec()]);
}
Next::write()
} else {
res.set_status(StatusCode::NotFound);
@ -192,6 +212,7 @@ fn should_extract_path_with_appid() {
},
file: None,
write_pos: 0,
safe_to_embed: true,
};
// when

View File

@ -8,17 +8,19 @@ authors = ["Ethcore <admin@ethcore.io>"]
build = "build.rs"
[build-dependencies]
syntex = "*"
syntex = "0.32"
ethcore-ipc-codegen = { path = "../ipc/codegen" }
[dependencies]
ethcore-util = { path = "../util" }
clippy = { version = "0.0.67", optional = true}
ethcore-devtools = { path = "../devtools" }
ethcore-ipc = { path = "../ipc/rpc" }
rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" }
semver = "0.2"
ethcore-ipc-nano = { path = "../ipc/nano" }
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
crossbeam = "0.2"
ethcore-util = { path = "../util" }
[features]
dev = ["clippy"]

View File

@ -19,14 +19,12 @@
use traits::*;
use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBIterator,
IndexType, Options, DBCompactionStyle, BlockBasedOptions, Direction};
use std::collections::BTreeMap;
use std::sync::{RwLock};
use std::sync::{RwLock, Arc};
use std::convert::From;
use ipc::IpcConfig;
use std::ops::*;
use std::mem;
use ipc::binary::BinaryConvertError;
use std::collections::VecDeque;
use std::collections::{VecDeque, HashMap, BTreeMap};
impl From<String> for Error {
fn from(s: String) -> Error {
@ -34,20 +32,136 @@ impl From<String> for Error {
}
}
enum WriteCacheEntry {
Remove,
Write(Vec<u8>),
}
pub struct WriteCache {
entries: HashMap<Vec<u8>, WriteCacheEntry>,
preferred_len: usize,
}
const FLUSH_BATCH_SIZE: usize = 4096;
impl WriteCache {
fn new(cache_len: usize) -> WriteCache {
WriteCache {
entries: HashMap::new(),
preferred_len: cache_len,
}
}
fn write(&mut self, key: Vec<u8>, val: Vec<u8>) {
self.entries.insert(key, WriteCacheEntry::Write(val));
}
fn remove(&mut self, key: Vec<u8>) {
self.entries.insert(key, WriteCacheEntry::Remove);
}
fn get(&self, key: &Vec<u8>) -> Option<Vec<u8>> {
self.entries.get(key).and_then(
|vec_ref| match vec_ref {
&WriteCacheEntry::Write(ref val) => Some(val.clone()),
&WriteCacheEntry::Remove => None
})
}
/// WriteCache should be locked for this
fn flush(&mut self, db: &DB, amount: usize) -> Result<(), Error> {
let batch = WriteBatch::new();
let mut removed_so_far = 0;
while removed_so_far < amount {
if self.entries.len() == 0 { break; }
let removed_key = {
let (key, cache_entry) = self.entries.iter().nth(0)
.expect("if entries.len == 0, we should have break in the loop, still we got here somehow");
match *cache_entry {
WriteCacheEntry::Write(ref val) => {
try!(batch.put(&key, val));
},
WriteCacheEntry::Remove => {
try!(batch.delete(&key));
},
}
key.clone()
};
self.entries.remove(&removed_key);
removed_so_far = removed_so_far + 1;
}
if removed_so_far > 0 {
try!(db.write(batch));
}
Ok(())
}
/// flushes until cache is empty
fn flush_all(&mut self, db: &DB) -> Result<(), Error> {
while !self.is_empty() { try!(self.flush(db, FLUSH_BATCH_SIZE)); }
Ok(())
}
fn is_empty(&self) -> bool {
self.entries.is_empty()
}
fn try_shrink(&mut self, db: &DB) -> Result<(), Error> {
if self.entries.len() > self.preferred_len {
try!(self.flush(db, FLUSH_BATCH_SIZE));
}
Ok(())
}
}
pub struct Database {
db: RwLock<Option<DB>>,
transactions: RwLock<BTreeMap<TransactionHandle, WriteBatch>>,
/// Iterators - dont't use between threads!
iterators: RwLock<BTreeMap<IteratorHandle, DBIterator>>,
write_cache: RwLock<WriteCache>,
}
unsafe impl Send for Database {}
unsafe impl Sync for Database {}
impl Database {
pub fn new() -> Database {
Database {
db: RwLock::new(None),
transactions: RwLock::new(BTreeMap::new()),
iterators: RwLock::new(BTreeMap::new()),
write_cache: RwLock::new(WriteCache::new(DEFAULT_CACHE_LEN)),
}
}
pub fn flush(&self) -> Result<(), Error> {
let mut cache_lock = self.write_cache.write().unwrap();
let db_lock = self.db.read().unwrap();
if db_lock.is_none() { return Ok(()); }
let db = db_lock.as_ref().unwrap();
try!(cache_lock.try_shrink(&db));
Ok(())
}
pub fn flush_all(&self) -> Result<(), Error> {
let mut cache_lock = self.write_cache.write().unwrap();
let db_lock = self.db.read().unwrap();
if db_lock.is_none() { return Ok(()); }
let db = db_lock.as_ref().expect("we should have exited with Ok(()) on the previous step");
try!(cache_lock.flush_all(&db));
Ok(())
}
}
impl Drop for Database {
fn drop(&mut self) {
self.flush().unwrap();
}
}
#[derive(Ipc)]
@ -72,51 +186,64 @@ impl DatabaseService for Database {
Ok(())
}
/// Opens database in the specified path with the default config
fn open_default(&self, path: String) -> Result<(), Error> {
self.open(DatabaseConfig::default(), path)
}
fn close(&self) -> Result<(), Error> {
try!(self.flush_all());
let mut db = self.db.write().unwrap();
if db.is_none() { return Err(Error::IsClosed); }
// TODO: wait for transactions to expire/close here?
if self.transactions.read().unwrap().len() > 0 { return Err(Error::UncommitedTransactions); }
*db = None;
Ok(())
}
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), Error> {
let db_lock = self.db.read().unwrap();
let db = try!(db_lock.as_ref().ok_or(Error::IsClosed));
try!(db.put(key, value));
let mut cache_lock = self.write_cache.write().unwrap();
cache_lock.write(key.to_vec(), value.to_vec());
Ok(())
}
fn delete(&self, key: &[u8]) -> Result<(), Error> {
let db_lock = self.db.read().unwrap();
let db = try!(db_lock.as_ref().ok_or(Error::IsClosed));
try!(db.delete(key));
let mut cache_lock = self.write_cache.write().unwrap();
cache_lock.remove(key.to_vec());
Ok(())
}
fn write(&self, handle: TransactionHandle) -> Result<(), Error> {
let db_lock = self.db.read().unwrap();
let db = try!(db_lock.as_ref().ok_or(Error::IsClosed));
fn write(&self, transaction: DBTransaction) -> Result<(), Error> {
let mut cache_lock = self.write_cache.write().unwrap();
let mut transactions = self.transactions.write().unwrap();
let batch = try!(
transactions.remove(&handle).ok_or(Error::TransactionUnknown)
);
try!(db.write(batch));
let mut writes = transaction.writes.borrow_mut();
for kv in writes.drain(..) {
cache_lock.write(kv.key, kv.value);
}
let mut removes = transaction.removes.borrow_mut();
for k in removes.drain(..) {
cache_lock.remove(k);
}
Ok(())
}
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
{
let key_vec = key.to_vec();
let cache_hit = self.write_cache.read().unwrap().get(&key_vec);
if cache_hit.is_some() {
return Ok(Some(cache_hit.expect("cache_hit.is_some() = true, still there is none somehow here")))
}
}
let db_lock = self.db.read().unwrap();
let db = try!(db_lock.as_ref().ok_or(Error::IsClosed));
match try!(db.get(key)) {
Some(db_vec) => Ok(Some(db_vec.to_vec())),
Some(db_vec) => {
Ok(Some(db_vec.to_vec()))
},
None => Ok(None),
}
}
@ -166,37 +293,35 @@ impl DatabaseService for Database {
})
}
fn transaction_put(&self, transaction: TransactionHandle, key: &[u8], value: &[u8]) -> Result<(), Error>
{
let mut transactions = self.transactions.write().unwrap();
let batch = try!(
transactions.get_mut(&transaction).ok_or(Error::TransactionUnknown)
);
try!(batch.put(&key, &value));
fn dispose_iter(&self, handle: IteratorHandle) -> Result<(), Error> {
let mut iterators = self.iterators.write().unwrap();
iterators.remove(&handle);
Ok(())
}
fn transaction_delete(&self, transaction: TransactionHandle, key: &[u8]) -> Result<(), Error> {
let mut transactions = self.transactions.write().unwrap();
let batch = try!(
transactions.get_mut(&transaction).ok_or(Error::TransactionUnknown)
);
try!(batch.delete(&key));
Ok(())
}
fn new_transaction(&self) -> TransactionHandle {
let mut transactions = self.transactions.write().unwrap();
let next_transaction = transactions.keys().last().unwrap_or(&0) + 1;
transactions.insert(next_transaction, WriteBatch::new());
next_transaction
}
}
// TODO : put proper at compile-time
impl IpcConfig for Database {}
/// Database iterator
pub struct DatabaseIterator {
client: Arc<DatabaseClient<::nanomsg::Socket>>,
handle: IteratorHandle,
}
impl Iterator for DatabaseIterator {
type Item = (Vec<u8>, Vec<u8>);
fn next(&mut self) -> Option<Self::Item> {
self.client.iter_next(self.handle).and_then(|kv| Some((kv.key, kv.value)))
}
}
impl Drop for DatabaseIterator {
fn drop(&mut self) {
self.client.dispose_iter(self.handle).unwrap();
}
}
#[cfg(test)]
mod test {
@ -215,7 +340,7 @@ mod test {
fn can_be_open_empty() {
let db = Database::new();
let path = RandomTempPath::create_dir();
db.open(DatabaseConfig { prefix_size: Some(8) }, path.as_str().to_owned()).unwrap();
db.open_default(path.as_str().to_owned()).unwrap();
assert!(db.is_empty().is_ok());
}
@ -224,9 +349,10 @@ mod test {
fn can_store_key() {
let db = Database::new();
let path = RandomTempPath::create_dir();
db.open(DatabaseConfig { prefix_size: None }, path.as_str().to_owned()).unwrap();
db.open_default(path.as_str().to_owned()).unwrap();
db.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
db.flush_all().unwrap();
assert!(!db.is_empty().unwrap());
}
@ -234,15 +360,37 @@ mod test {
fn can_retrieve() {
let db = Database::new();
let path = RandomTempPath::create_dir();
db.open(DatabaseConfig { prefix_size: None }, path.as_str().to_owned()).unwrap();
db.open_default(path.as_str().to_owned()).unwrap();
db.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
db.close().unwrap();
db.open(DatabaseConfig { prefix_size: None }, path.as_str().to_owned()).unwrap();
db.open_default(path.as_str().to_owned()).unwrap();
assert_eq!(db.get("xxx".as_bytes()).unwrap().unwrap(), "1".as_bytes().to_vec());
}
}
#[cfg(test)]
mod write_cache_tests {
use super::Database;
use traits::*;
use devtools::*;
#[test]
fn cache_write_flush() {
let db = Database::new();
let path = RandomTempPath::create_dir();
db.open_default(path.as_str().to_owned()).unwrap();
db.put("100500".as_bytes(), "1".as_bytes()).unwrap();
db.delete("100500".as_bytes()).unwrap();
db.flush_all().unwrap();
let val = db.get("100500".as_bytes()).unwrap();
assert!(val.is_none());
}
}
#[cfg(test)]
mod client_tests {
use super::{DatabaseClient, Database};
@ -251,6 +399,8 @@ mod client_tests {
use nanoipc;
use std::sync::Arc;
use std::sync::atomic::{Ordering, AtomicBool};
use crossbeam;
use run_worker;
fn init_worker(addr: &str) -> nanoipc::Worker<Database> {
let mut worker = nanoipc::Worker::<Database>::new(&Arc::new(Database::new()));
@ -304,7 +454,7 @@ mod client_tests {
while !worker_is_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_duplex_client::<DatabaseClient<_>>(url).unwrap();
client.open(DatabaseConfig { prefix_size: Some(8) }, path.as_str().to_owned()).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
assert!(client.is_empty().unwrap());
worker_should_exit.store(true, Ordering::Relaxed);
}
@ -314,27 +464,16 @@ mod client_tests {
let url = "ipc:///tmp/parity-db-ipc-test-30.ipc";
let path = RandomTempPath::create_dir();
let worker_should_exit = Arc::new(AtomicBool::new(false));
let worker_is_ready = Arc::new(AtomicBool::new(false));
let c_worker_should_exit = worker_should_exit.clone();
let c_worker_is_ready = worker_is_ready.clone();
::std::thread::spawn(move || {
let mut worker = init_worker(url);
while !c_worker_should_exit.load(Ordering::Relaxed) {
worker.poll();
c_worker_is_ready.store(true, Ordering::Relaxed);
}
});
while !worker_is_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_duplex_client::<DatabaseClient<_>>(url).unwrap();
client.open(DatabaseConfig { prefix_size: Some(8) }, path.as_str().to_owned()).unwrap();
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
client.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
client.close().unwrap();
worker_should_exit.store(true, Ordering::Relaxed);
stop.store(true, Ordering::Relaxed);
});
}
#[test]
@ -342,29 +481,93 @@ mod client_tests {
let url = "ipc:///tmp/parity-db-ipc-test-40.ipc";
let path = RandomTempPath::create_dir();
let worker_should_exit = Arc::new(AtomicBool::new(false));
let worker_is_ready = Arc::new(AtomicBool::new(false));
let c_worker_should_exit = worker_should_exit.clone();
let c_worker_is_ready = worker_is_ready.clone();
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
::std::thread::spawn(move || {
let mut worker = init_worker(url);
while !c_worker_should_exit.load(Ordering::Relaxed) {
worker.poll();
c_worker_is_ready.store(true, Ordering::Relaxed);
}
});
while !worker_is_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_duplex_client::<DatabaseClient<_>>(url).unwrap();
client.open(DatabaseConfig { prefix_size: Some(8) }, path.as_str().to_owned()).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
client.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
client.close().unwrap();
client.open(DatabaseConfig { prefix_size: Some(8) }, path.as_str().to_owned()).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
assert_eq!(client.get("xxx".as_bytes()).unwrap().unwrap(), "1".as_bytes().to_vec());
worker_should_exit.store(true, Ordering::Relaxed);
stop.store(true, Ordering::Relaxed);
});
}
#[test]
fn can_read_empty() {
let url = "ipc:///tmp/parity-db-ipc-test-45.ipc";
let path = RandomTempPath::create_dir();
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
assert!(client.get("xxx".as_bytes()).unwrap().is_none());
stop.store(true, Ordering::Relaxed);
});
}
#[test]
fn can_commit_client_transaction() {
let url = "ipc:///tmp/parity-db-ipc-test-60.ipc";
let path = RandomTempPath::create_dir();
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
let transaction = DBTransaction::new();
transaction.put("xxx".as_bytes(), "1".as_bytes());
client.write(transaction).unwrap();
client.close().unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
assert_eq!(client.get("xxx".as_bytes()).unwrap().unwrap(), "1".as_bytes().to_vec());
stop.store(true, Ordering::Relaxed);
});
}
#[test]
fn key_write_read_ipc() {
let url = "ipc:///tmp/parity-db-ipc-test-70.ipc";
let path = RandomTempPath::create_dir();
crossbeam::scope(|scope| {
let stop = StopGuard::new();
run_worker(&scope, stop.share(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
let mut batch = Vec::new();
for _ in 0..100 {
batch.push((random_str(256).as_bytes().to_vec(), random_str(256).as_bytes().to_vec()));
batch.push((random_str(256).as_bytes().to_vec(), random_str(2048).as_bytes().to_vec()));
batch.push((random_str(2048).as_bytes().to_vec(), random_str(2048).as_bytes().to_vec()));
batch.push((random_str(2048).as_bytes().to_vec(), random_str(256).as_bytes().to_vec()));
}
for &(ref k, ref v) in batch.iter() {
client.put(k, v).unwrap();
}
client.close().unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
for &(ref k, ref v) in batch.iter() {
assert_eq!(v, &client.get(k).unwrap().unwrap());
}
});
}
}

View File

@ -19,7 +19,71 @@ extern crate rocksdb;
extern crate ethcore_devtools as devtools;
extern crate semver;
extern crate ethcore_ipc_nano as nanoipc;
extern crate nanomsg;
extern crate crossbeam;
extern crate ethcore_util as util;
pub mod database;
pub mod traits;
pub use traits::{DatabaseService, DBTransaction, Error};
pub use database::{Database, DatabaseClient, DatabaseIterator};
use std::sync::Arc;
use std::sync::atomic::*;
use std::path::PathBuf;
pub type DatabaseNanoClient = DatabaseClient<::nanomsg::Socket>;
pub type DatabaseConnection = nanoipc::GuardedSocket<DatabaseNanoClient>;
#[derive(Debug)]
pub enum ServiceError {
Io(std::io::Error),
Socket(nanoipc::SocketError),
}
impl std::convert::From<std::io::Error> for ServiceError {
fn from(io_error: std::io::Error) -> ServiceError { ServiceError::Io(io_error) }
}
impl std::convert::From<nanoipc::SocketError> for ServiceError {
fn from(socket_error: nanoipc::SocketError) -> ServiceError { ServiceError::Socket(socket_error) }
}
pub fn blocks_service_url(db_path: &str) -> Result<String, std::io::Error> {
let mut path = PathBuf::from(db_path);
try!(::std::fs::create_dir_all(db_path));
path.push("blocks.ipc");
Ok(format!("ipc://{}", path.to_str().unwrap()))
}
pub fn extras_service_url(db_path: &str) -> Result<String, ::std::io::Error> {
let mut path = PathBuf::from(db_path);
try!(::std::fs::create_dir_all(db_path));
path.push("extras.ipc");
Ok(format!("ipc://{}", path.to_str().unwrap()))
}
pub fn blocks_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> {
let url = try!(blocks_service_url(db_path));
let client = try!(nanoipc::init_client::<DatabaseClient<_>>(&url));
Ok(client)
}
pub fn extras_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> {
let url = try!(extras_service_url(db_path));
let client = try!(nanoipc::init_client::<DatabaseClient<_>>(&url));
Ok(client)
}
// for tests
pub fn run_worker(scope: &crossbeam::Scope, stop: Arc<AtomicBool>, socket_path: &str) {
let socket_path = socket_path.to_owned();
scope.spawn(move || {
let mut worker = nanoipc::Worker::new(&Arc::new(Database::new()));
worker.add_reqrep(&socket_path).unwrap();
while !stop.load(Ordering::Relaxed) {
worker.poll();
}
});
}

View File

View File

@ -1,13 +1,30 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethcore database trait
use ipc::BinaryConvertable;
use std::mem;
use ipc::binary::BinaryConvertError;
use std::collections::VecDeque;
use std::cell::RefCell;
pub type TransactionHandle = u32;
pub type IteratorHandle = u32;
pub const DEFAULT_CACHE_LEN: usize = 12288;
#[derive(Binary)]
pub struct KeyValue {
pub key: Vec<u8>,
@ -28,13 +45,36 @@ pub enum Error {
#[derive(Binary)]
pub struct DatabaseConfig {
/// Optional prefix size in bytes. Allows lookup by partial key.
pub prefix_size: Option<usize>
pub prefix_size: Option<usize>,
/// write cache length
pub cache: usize,
}
pub trait DatabaseService {
impl Default for DatabaseConfig {
fn default() -> DatabaseConfig {
DatabaseConfig {
prefix_size: None,
cache: DEFAULT_CACHE_LEN,
}
}
}
impl DatabaseConfig {
fn with_prefix(prefix: usize) -> DatabaseConfig {
DatabaseConfig {
prefix_size: Some(prefix),
cache: DEFAULT_CACHE_LEN,
}
}
}
pub trait DatabaseService : Sized {
/// Opens database in the specified path
fn open(&self, config: DatabaseConfig, path: String) -> Result<(), Error>;
/// Opens database in the specified path with the default config
fn open_default(&self, path: String) -> Result<(), Error>;
/// Closes database
fn close(&self) -> Result<(), Error>;
@ -44,18 +84,6 @@ pub trait DatabaseService {
/// Delete value by key.
fn delete(&self, key: &[u8]) -> Result<(), Error>;
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten.
fn transaction_put(&self, transaction: TransactionHandle, key: &[u8], value: &[u8]) -> Result<(), Error>;
/// Delete value by key using transaction
fn transaction_delete(&self, transaction: TransactionHandle, key: &[u8]) -> Result<(), Error>;
/// Commit transaction to database.
fn write(&self, tr: TransactionHandle) -> Result<(), Error>;
/// Initiate new transaction on database
fn new_transaction(&self) -> TransactionHandle;
/// Get value by key.
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error>;
@ -70,4 +98,35 @@ pub trait DatabaseService {
/// Next key-value for the the given iterator
fn iter_next(&self, iterator: IteratorHandle) -> Option<KeyValue>;
/// Dispose iteration that is no longer needed
fn dispose_iter(&self, handle: IteratorHandle) -> Result<(), Error>;
/// Write client transaction
fn write(&self, transaction: DBTransaction) -> Result<(), Error>;
}
#[derive(Binary)]
pub struct DBTransaction {
pub writes: RefCell<Vec<KeyValue>>,
pub removes: RefCell<Vec<Vec<u8>>>,
}
impl DBTransaction {
pub fn new() -> DBTransaction {
DBTransaction {
writes: RefCell::new(Vec::new()),
removes: RefCell::new(Vec::new()),
}
}
pub fn put(&self, key: &[u8], value: &[u8]) {
let mut brw = self.writes.borrow_mut();
brw.push(KeyValue { key: key.to_vec(), value: value.to_vec() });
}
pub fn delete(&self, key: &[u8]) {
let mut brw = self.removes.borrow_mut();
brw.push(key.to_vec());
}
}

View File

@ -261,7 +261,7 @@ mod tests {
let mut db = MemoryDB::new();
let mut db = AccountDBMut::new(&mut db, &Address::new());
let rlp = {
let mut a = Account::new_contract(x!(69), x!(0));
let mut a = Account::new_contract(69.into(), 0.into());
a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64)));
a.commit_storage(&mut db);
a.init_code(vec![]);
@ -281,7 +281,7 @@ mod tests {
let mut db = AccountDBMut::new(&mut db, &Address::new());
let rlp = {
let mut a = Account::new_contract(x!(69), x!(0));
let mut a = Account::new_contract(69.into(), 0.into());
a.init_code(vec![0x55, 0x44, 0xffu8]);
a.commit_code(&mut db);
a.rlp()
@ -296,10 +296,10 @@ mod tests {
#[test]
fn commit_storage() {
let mut a = Account::new_contract(x!(69), x!(0));
let mut a = Account::new_contract(69.into(), 0.into());
let mut db = MemoryDB::new();
let mut db = AccountDBMut::new(&mut db, &Address::new());
a.set_storage(x!(0), x!(0x1234));
a.set_storage(0.into(), 0x1234.into());
assert_eq!(a.storage_root(), None);
a.commit_storage(&mut db);
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
@ -307,21 +307,21 @@ mod tests {
#[test]
fn commit_remove_commit_storage() {
let mut a = Account::new_contract(x!(69), x!(0));
let mut a = Account::new_contract(69.into(), 0.into());
let mut db = MemoryDB::new();
let mut db = AccountDBMut::new(&mut db, &Address::new());
a.set_storage(x!(0), x!(0x1234));
a.set_storage(0.into(), 0x1234.into());
a.commit_storage(&mut db);
a.set_storage(x!(1), x!(0x1234));
a.set_storage(1.into(), 0x1234.into());
a.commit_storage(&mut db);
a.set_storage(x!(1), x!(0));
a.set_storage(1.into(), 0.into());
a.commit_storage(&mut db);
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
}
#[test]
fn commit_code() {
let mut a = Account::new_contract(x!(69), x!(0));
let mut a = Account::new_contract(69.into(), 0.into());
let mut db = MemoryDB::new();
let mut db = AccountDBMut::new(&mut db, &Address::new());
a.init_code(vec![0x55, 0x44, 0xffu8]);

View File

@ -20,7 +20,7 @@ impl<'db> AccountDB<'db> {
pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> {
AccountDB {
db: db,
address: x!(address),
address: address.into(),
}
}
}
@ -67,7 +67,7 @@ impl<'db> AccountDBMut<'db> {
pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> {
AccountDBMut {
db: db,
address: x!(address),
address: address.into(),
}
}

View File

@ -86,9 +86,9 @@ impl Engine for BasicAuthority {
let gas_limit = parent.gas_limit;
let bound_divisor = self.our_params.gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
min(gas_floor_target, gas_limit + gas_limit / bound_divisor - x!(1))
min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + x!(1))
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
}
};
header.note_dirty();
@ -211,12 +211,12 @@ mod tests {
let engine = new_test_authority().engine;
let schedule = engine.schedule(&EnvInfo {
number: 10000000,
author: x!(0),
author: 0.into(),
timestamp: 0,
difficulty: x!(0),
difficulty: 0.into(),
last_hashes: vec![],
gas_used: x!(0),
gas_limit: x!(0)
gas_used: 0.into(),
gas_limit: 0.into()
});
assert!(schedule.stack_limit > 0);
@ -278,7 +278,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, addr.clone(), x!(3141562), vec![]);
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, addr.clone(), 3141562.into(), vec![]);
let b = b.close_and_lock();
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();

View File

@ -469,7 +469,7 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
}
}
let mut b = OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, header.author().clone(), x!(3141562), header.extra_data().clone());
let mut b = OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, header.author().clone(), 3141562.into(), header.extra_data().clone());
b.set_difficulty(*header.difficulty());
b.set_gas_limit(*header.gas_limit());
b.set_timestamp(header.timestamp());
@ -514,7 +514,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]);
let b = b.close_and_lock();
let _ = b.seal(engine.deref(), vec![]);
}
@ -530,7 +530,7 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]).close_and_lock().seal(engine.deref(), vec![]).unwrap();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), 3141562.into(), vec![]).close_and_lock().seal(engine.deref(), vec![]).unwrap();
let orig_bytes = b.rlp_bytes();
let orig_db = b.drain();
@ -557,7 +557,7 @@ mod tests {
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let vm_factory = Default::default();
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]);
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), 3141562.into(), vec![]);
let mut uncle1_header = Header::new();
uncle1_header.extra_data = b"uncle1".to_vec();
let mut uncle2_header = Header::new();

View File

@ -311,17 +311,17 @@ impl BlockQueue {
let h = header.hash();
{
if self.processing.read().unwrap().contains(&h) {
return Err(x!(ImportError::AlreadyQueued));
return Err(ImportError::AlreadyQueued.into());
}
let mut bad = self.verification.bad.lock().unwrap();
if bad.contains(&h) {
return Err(x!(ImportError::KnownBad));
return Err(ImportError::KnownBad.into());
}
if bad.contains(&header.parent_hash) {
bad.insert(h.clone());
return Err(x!(ImportError::KnownBad));
return Err(ImportError::KnownBad.into());
}
}

View File

@ -254,7 +254,7 @@ impl<V> Client<V> where V: Verifier {
/// This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks(&self, io: &IoChannel<NetSyncMessage>) -> usize {
let max_blocks_to_import = 128;
let max_blocks_to_import = 64;
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
let mut invalid_blocks = HashSet::new();
@ -655,10 +655,10 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
{
let header = BlockView::new(&bytes).header_view();
if self.chain.is_known(&header.sha3()) {
return Err(x!(ImportError::AlreadyInChain));
return Err(ImportError::AlreadyInChain.into());
}
if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown {
return Err(x!(BlockError::UnknownParent(header.parent_hash())));
return Err(BlockError::UnknownParent(header.parent_hash()).into());
}
}
self.block_queue.import_block(bytes)

View File

@ -47,10 +47,10 @@ impl Default for EnvInfo {
number: 0,
author: Address::new(),
timestamp: 0,
difficulty: x!(0),
gas_limit: x!(0),
difficulty: 0.into(),
gas_limit: 0.into(),
last_hashes: vec![],
gas_used: x!(0),
gas_used: 0.into(),
}
}
}
@ -92,15 +92,15 @@ mod tests {
assert_eq!(env_info.number, 1112339);
assert_eq!(env_info.author, Address::from_str("000000f00000000f000000000000f00000000f00").unwrap());
assert_eq!(env_info.gas_limit, x!(40000));
assert_eq!(env_info.difficulty, x!(50000));
assert_eq!(env_info.gas_used, x!(0));
assert_eq!(env_info.gas_limit, 40000.into());
assert_eq!(env_info.difficulty, 50000.into());
assert_eq!(env_info.gas_used, 0.into());
}
#[test]
fn it_can_be_created_as_default() {
let default_env_info = EnvInfo::default();
assert_eq!(default_env_info.difficulty, x!(0));
assert_eq!(default_env_info.difficulty, 0.into());
}
}

View File

@ -111,9 +111,9 @@ impl Engine for Ethash {
let gas_limit = parent.gas_limit;
let bound_divisor = self.ethash_params.gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
min(gas_floor_target, gas_limit + gas_limit / bound_divisor - x!(1))
min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + x!(1) + (header.gas_used * x!(6) / x!(5)) / bound_divisor)
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into() + (header.gas_used * 6.into() / 5.into()) / bound_divisor)
}
};
header.note_dirty();
@ -255,12 +255,12 @@ impl Ethash {
/// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`.
pub fn boundary_to_difficulty(boundary: &H256) -> U256 {
U256::from((U512::one() << 256) / x!(U256::from(boundary.as_slice())))
U256::from((U512::one() << 256) / U256::from(boundary.as_slice()).into())
}
/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`.
pub fn difficulty_to_boundary(difficulty: &U256) -> H256 {
x!(U256::from((U512::one() << 256) / x!(difficulty)))
U256::from((U512::one() << 256) / difficulty.into()).into()
}
fn to_ethash(hash: H256) -> EH256 {
@ -308,7 +308,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]);
let b = b.close();
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
}
@ -323,7 +323,7 @@ mod tests {
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]);
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, Address::zero(), 3141562.into(), vec![]);
let mut uncle = Header::new();
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
uncle.author = uncle_author.clone();
@ -346,24 +346,24 @@ mod tests {
let engine = new_morden().engine;
let schedule = engine.schedule(&EnvInfo {
number: 10000000,
author: x!(0),
author: 0.into(),
timestamp: 0,
difficulty: x!(0),
difficulty: 0.into(),
last_hashes: vec![],
gas_used: x!(0),
gas_limit: x!(0)
gas_used: 0.into(),
gas_limit: 0.into()
});
assert!(schedule.stack_limit > 0);
let schedule = engine.schedule(&EnvInfo {
number: 100,
author: x!(0),
author: 0.into(),
timestamp: 0,
difficulty: x!(0),
difficulty: 0.into(),
last_hashes: vec![],
gas_used: x!(0),
gas_limit: x!(0)
gas_used: 0.into(),
gas_limit: 0.into()
});
assert!(!schedule.have_delegate_call);

View File

@ -361,7 +361,7 @@ impl<'a> Executive<'a> {
let refunds_bound = sstore_refunds + suicide_refunds;
// real ammount to refund
let gas_left_prerefund = match result { Ok(x) => x, _ => x!(0) };
let gas_left_prerefund = match result { Ok(x) => x, _ => 0.into() };
let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) / U256::from(2));
let gas_left = gas_left_prerefund + refunded;
@ -588,10 +588,10 @@ mod tests {
let expected_trace = vec![ Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("cd1722f3947def4cf144679da39c4c32bdc35681"),
to: x!("b010143a42d5980c7e5ef0e4a4416dc098a4fed3"),
value: x!(100),
gas: x!(100000),
from: "cd1722f3947def4cf144679da39c4c32bdc35681".into(),
to: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(),
value: 100.into(),
gas: 100000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -601,9 +601,9 @@ mod tests {
subs: vec![Trace {
depth: 1,
action: trace::Action::Create(trace::Create {
from: x!("b010143a42d5980c7e5ef0e4a4416dc098a4fed3"),
value: x!(23),
gas: x!(67979),
from: "b010143a42d5980c7e5ef0e4a4416dc098a4fed3".into(),
value: 23.into(),
gas: 67979.into(),
init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85]
}),
result: trace::Res::Create(trace::CreateResult {
@ -642,7 +642,7 @@ mod tests {
params.origin = sender.clone();
params.gas = U256::from(100_000);
params.code = Some(code.clone());
params.value = ActionValue::Transfer(x!(100));
params.value = ActionValue::Transfer(100.into());
let mut state_result = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100));
@ -660,7 +660,7 @@ mod tests {
depth: 0,
action: trace::Action::Create(trace::Create {
from: params.sender,
value: x!(100),
value: 100.into(),
gas: params.gas,
init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85],
}),

View File

@ -311,12 +311,12 @@ mod tests {
fn get_test_env_info() -> EnvInfo {
EnvInfo {
number: 100,
author: x!(0),
author: 0.into(),
timestamp: 0,
difficulty: x!(0),
difficulty: 0.into(),
last_hashes: vec![],
gas_used: x!(0),
gas_limit: x!(0)
gas_used: 0.into(),
gas_limit: 0.into()
}
}

View File

@ -114,11 +114,11 @@ mod test {
#[test]
fn existence() {
let a = PodAccount{balance: x!(69), nonce: x!(0), code: vec![], storage: map![]};
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: vec![], storage: map![]};
assert_eq!(AccountDiff::diff_pod(Some(&a), Some(&a)), None);
assert_eq!(AccountDiff::diff_pod(None, Some(&a)), Some(AccountDiff{
balance: Diff::Born(x!(69)),
nonce: Diff::Born(x!(0)),
balance: Diff::Born(69.into()),
nonce: Diff::Born(0.into()),
code: Diff::Born(vec![]),
storage: map![],
}));
@ -126,11 +126,11 @@ mod test {
#[test]
fn basic() {
let a = PodAccount{balance: x!(69), nonce: x!(0), code: vec![], storage: map![]};
let b = PodAccount{balance: x!(42), nonce: x!(1), code: vec![], storage: map![]};
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: vec![], storage: map![]};
let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: vec![], storage: map![]};
assert_eq!(AccountDiff::diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Changed(x!(69), x!(42)),
nonce: Diff::Changed(x!(0), x!(1)),
balance: Diff::Changed(69.into(), 42.into()),
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Same,
storage: map![],
}));
@ -138,11 +138,11 @@ mod test {
#[test]
fn code() {
let a = PodAccount{balance: x!(0), nonce: x!(0), code: vec![], storage: map![]};
let b = PodAccount{balance: x!(0), nonce: x!(1), code: vec![0], storage: map![]};
let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: vec![], storage: map![]};
let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: vec![0], storage: map![]};
assert_eq!(AccountDiff::diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Changed(x!(0), x!(1)),
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Changed(vec![], vec![0]),
storage: map![],
}));
@ -151,27 +151,27 @@ mod test {
#[test]
fn storage() {
let a = PodAccount {
balance: x!(0),
nonce: x!(0),
balance: 0.into(),
nonce: 0.into(),
code: vec![],
storage: mapx![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0]
storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0]
};
let b = PodAccount {
balance: x!(0),
nonce: x!(0),
balance: 0.into(),
nonce: 0.into(),
code: vec![],
storage: mapx![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9]
storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9]
};
assert_eq!(AccountDiff::diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Same,
code: Diff::Same,
storage: map![
x!(2) => Diff::new(x!(2), x!(3)),
x!(3) => Diff::new(x!(3), x!(0)),
x!(4) => Diff::new(x!(4), x!(0)),
x!(7) => Diff::new(x!(0), x!(7)),
x!(9) => Diff::new(x!(0), x!(9))
2.into() => Diff::new(2.into(), 3.into()),
3.into() => Diff::new(3.into(), 0.into()),
4.into() => Diff::new(4.into(), 0.into()),
7.into() => Diff::new(0.into(), 7.into()),
9.into() => Diff::new(0.into(), 9.into())
],
}));
}

View File

@ -214,7 +214,7 @@ impl State {
/// Initialise the code of account `a` so that it is `value` for `key`.
/// NOTE: Account should have been created with `new_contract`.
pub fn init_code(&mut self, a: &Address, code: Bytes) {
self.require_or_from(a, true, || Account::new_contract(x!(0), self.account_start_nonce), |_|{}).init_code(code);
self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{}).init_code(code);
}
/// Execute a given transaction.
@ -377,27 +377,27 @@ fn should_apply_create_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: x!(100),
value: 100.into(),
data: FromHex::from_hex("601080600c6000396000f3006000355415600957005b60203560003555").unwrap(),
}.sign(&"".sha3());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Create(trace::Create {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
value: x!(100),
gas: x!(77412),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
value: 100.into(),
gas: 77412.into(),
init: vec![96, 16, 128, 96, 12, 96, 0, 57, 96, 0, 243, 0, 96, 0, 53, 84, 21, 96, 9, 87, 0, 91, 96, 32, 53, 96, 0, 53, 85],
}),
result: trace::Res::Create(trace::CreateResult {
@ -438,27 +438,27 @@ fn should_trace_failed_create_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: x!(100),
value: 100.into(),
data: FromHex::from_hex("5b600056").unwrap(),
}.sign(&"".sha3());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Create(trace::Create {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
value: x!(100),
gas: x!(78792),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
value: 100.into(),
gas: 78792.into(),
init: vec![91, 96, 0, 86],
}),
result: trace::Res::FailedCreate,
@ -476,29 +476,29 @@ fn should_trace_call_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -519,28 +519,28 @@ fn should_trace_basic_call_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],
}.sign(&"".sha3());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -561,15 +561,15 @@ fn should_trace_call_transaction_to_builtin() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = Spec::new_test().engine;
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0x1)),
value: x!(0),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0x1.into()),
value: 0.into(),
data: vec![],
}.sign(&"".sha3());
@ -579,10 +579,10 @@ fn should_trace_call_transaction_to_builtin() {
assert_eq!(result.trace, Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!("0000000000000000000000000000000000000001"),
value: x!(0),
gas: x!(79_000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: "0000000000000000000000000000000000000001".into(),
value: 0.into(),
gas: 79_000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -601,29 +601,29 @@ fn should_not_trace_subcall_transaction_to_builtin() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = Spec::new_test().engine;
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(0),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 0.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("600060006000600060006001610be0f1").unwrap());
state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap());
let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(0),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 0.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -643,30 +643,30 @@ fn should_not_trace_callcode() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = Spec::new_test().engine;
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(0),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 0.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b611000f2").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("6000").unwrap());
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap());
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap());
let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(0),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 0.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -686,33 +686,33 @@ fn should_not_trace_delegatecall() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
info.number = 0x789b0;
let engine = Spec::new_test().engine;
println!("schedule.have_delegate_call: {:?}", engine.schedule(&info).have_delegate_call);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(0),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 0.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("6000600060006000600b618000f4").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("6000").unwrap());
state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap());
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap());
let vm_factory = Default::default();
let result = state.apply(&info, engine.deref(), &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(0),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 0.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -732,29 +732,29 @@ fn should_trace_failed_call_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("5b600056").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::FailedCall,
@ -774,30 +774,30 @@ fn should_trace_call_with_subcall_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -807,10 +807,10 @@ fn should_trace_call_with_subcall_transaction() {
subs: vec![Trace {
depth: 1,
action: trace::Action::Call(trace::Call {
from: x!(0xa),
to: x!(0xb),
value: x!(0),
gas: x!(78934),
from: 0xa.into(),
to: 0xb.into(),
value: 0.into(),
gas: 78934.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -832,29 +832,29 @@ fn should_trace_call_with_basic_subcall_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006045600b6000f1").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -864,10 +864,10 @@ fn should_trace_call_with_basic_subcall_transaction() {
subs: vec![Trace {
depth: 1,
action: trace::Action::Call(trace::Call {
from: x!(0xa),
to: x!(0xb),
value: x!(69),
gas: x!(2300),
from: 0xa.into(),
to: 0xb.into(),
value: 69.into(),
gas: 2300.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult::default()),
@ -886,29 +886,29 @@ fn should_not_trace_call_with_invalid_basic_subcall_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()); // not enough funds.
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()); // not enough funds.
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -929,30 +929,30 @@ fn should_trace_failed_subcall_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],//600480600b6000396000f35b600056
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("5b600056").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -962,10 +962,10 @@ fn should_trace_failed_subcall_transaction() {
subs: vec![Trace {
depth: 1,
action: trace::Action::Call(trace::Call {
from: x!(0xa),
to: x!(0xb),
value: x!(0),
gas: x!(78934),
from: 0xa.into(),
to: 0xb.into(),
value: 0.into(),
gas: 78934.into(),
input: vec![],
}),
result: trace::Res::FailedCall,
@ -984,31 +984,31 @@ fn should_trace_call_with_subcall_with_subcall_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap());
state.init_code(&x!(0xc), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap());
state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -1018,10 +1018,10 @@ fn should_trace_call_with_subcall_with_subcall_transaction() {
subs: vec![Trace {
depth: 1,
action: trace::Action::Call(trace::Call {
from: x!(0xa),
to: x!(0xb),
value: x!(0),
gas: x!(78934),
from: 0xa.into(),
to: 0xb.into(),
value: 0.into(),
gas: 78934.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -1031,10 +1031,10 @@ fn should_trace_call_with_subcall_with_subcall_transaction() {
subs: vec![Trace {
depth: 2,
action: trace::Action::Call(trace::Call {
from: x!(0xb),
to: x!(0xc),
value: x!(0),
gas: x!(78868),
from: 0xb.into(),
to: 0xc.into(),
value: 0.into(),
gas: 78868.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -1057,31 +1057,31 @@ fn should_trace_failed_subcall_with_subcall_transaction() {
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default();
info.gas_limit = x!(1_000_000);
info.gas_limit = 1_000_000.into();
let engine = TestEngine::new(5);
let t = Transaction {
nonce: x!(0),
gas_price: x!(0),
gas: x!(100_000),
action: Action::Call(x!(0xa)),
value: x!(100),
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Call(0xa.into()),
value: 100.into(),
data: vec![],//600480600b6000396000f35b600056
}.sign(&"".sha3());
state.init_code(&x!(0xa), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&x!(0xb), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap());
state.init_code(&x!(0xc), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &x!(100));
state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap());
state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap());
state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap());
state.add_balance(t.sender().as_ref().unwrap(), &(100.into()));
let vm_factory = Default::default();
let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap();
let expected_trace = Some(Trace {
depth: 0,
action: trace::Action::Call(trace::Call {
from: x!("9cce34f7ab185c7aba1b7c8140d620b4bda941d6"),
to: x!(0xa),
value: x!(100),
gas: x!(79000),
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
to: 0xa.into(),
value: 100.into(),
gas: 79000.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -1091,20 +1091,20 @@ fn should_trace_failed_subcall_with_subcall_transaction() {
subs: vec![Trace {
depth: 1,
action: trace::Action::Call(trace::Call {
from: x!(0xa),
to: x!(0xb),
value: x!(0),
gas: x!(78934),
from: 0xa.into(),
to: 0xb.into(),
value: 0.into(),
gas: 78934.into(),
input: vec![],
}),
result: trace::Res::FailedCall,
subs: vec![Trace {
depth: 2,
action: trace::Action::Call(trace::Call {
from: x!(0xb),
to: x!(0xc),
value: x!(0),
gas: x!(78868),
from: 0xb.into(),
to: 0xc.into(),
value: 0.into(),
gas: 78868.into(),
input: vec![],
}),
result: trace::Res::Call(trace::CallResult {
@ -1125,7 +1125,7 @@ fn code_from_database() {
let temp = RandomTempPath::new();
let (root, db) = {
let mut state = get_temp_state_in(temp.as_path());
state.require_or_from(&a, false, ||Account::new_contract(x!(42), x!(0)), |_|{});
state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{});
state.init_code(&a, vec![1, 2, 3]);
assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec()));
state.commit();

View File

@ -59,19 +59,19 @@ mod test {
#[test]
fn create_delete() {
let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
let a = PodState::from(map![ 1.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]) ]);
assert_eq!(StateDiff::diff_pod(&a, &PodState::new()), StateDiff(map![
x!(1) => AccountDiff{
balance: Diff::Died(x!(69)),
nonce: Diff::Died(x!(0)),
1.into() => AccountDiff{
balance: Diff::Died(69.into()),
nonce: Diff::Died(0.into()),
code: Diff::Died(vec![]),
storage: map![],
}
]));
assert_eq!(StateDiff::diff_pod(&PodState::new(), &a), StateDiff(map![
x!(1) => AccountDiff{
balance: Diff::Born(x!(69)),
nonce: Diff::Born(x!(0)),
1.into() => AccountDiff{
balance: Diff::Born(69.into()),
nonce: Diff::Born(0.into()),
code: Diff::Born(vec![]),
storage: map![],
}
@ -80,23 +80,23 @@ mod test {
#[test]
fn create_delete_with_unchanged() {
let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
let a = PodState::from(map![ 1.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]) ]);
let b = PodState::from(map![
x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]),
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
1.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]),
2.into() => PodAccount::new(69.into(), 0.into(), vec![], map![])
]);
assert_eq!(StateDiff::diff_pod(&a, &b), StateDiff(map![
x!(2) => AccountDiff{
balance: Diff::Born(x!(69)),
nonce: Diff::Born(x!(0)),
2.into() => AccountDiff{
balance: Diff::Born(69.into()),
nonce: Diff::Born(0.into()),
code: Diff::Born(vec![]),
storage: map![],
}
]));
assert_eq!(StateDiff::diff_pod(&b, &a), StateDiff(map![
x!(2) => AccountDiff{
balance: Diff::Died(x!(69)),
nonce: Diff::Died(x!(0)),
2.into() => AccountDiff{
balance: Diff::Died(69.into()),
nonce: Diff::Died(0.into()),
code: Diff::Died(vec![]),
storage: map![],
}
@ -106,17 +106,17 @@ mod test {
#[test]
fn change_with_unchanged() {
let a = PodState::from(map![
x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]),
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
1.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]),
2.into() => PodAccount::new(69.into(), 0.into(), vec![], map![])
]);
let b = PodState::from(map![
x!(1) => PodAccount::new(x!(69), x!(1), vec![], map![]),
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
1.into() => PodAccount::new(69.into(), 1.into(), vec![], map![]),
2.into() => PodAccount::new(69.into(), 0.into(), vec![], map![])
]);
assert_eq!(StateDiff::diff_pod(&a, &b), StateDiff(map![
x!(1) => AccountDiff{
1.into() => AccountDiff{
balance: Diff::Same,
nonce: Diff::Changed(x!(0), x!(1)),
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Same,
storage: map![],
}

View File

@ -74,7 +74,7 @@ mod tests {
topics: vec![],
data: vec![]
});
sub_state.sstore_clears_count = x!(5);
sub_state.sstore_clears_count = 5.into();
sub_state.suicides.insert(address_from_u64(10u64));
let mut sub_state_2 = Substate::new();
@ -84,11 +84,11 @@ mod tests {
topics: vec![],
data: vec![]
});
sub_state_2.sstore_clears_count = x!(7);
sub_state_2.sstore_clears_count = 7.into();
sub_state.accrue(sub_state_2);
assert_eq!(sub_state.contracts_created.len(), 2);
assert_eq!(sub_state.sstore_clears_count, x!(12));
assert_eq!(sub_state.sstore_clears_count, 12.into());
assert_eq!(sub_state.suicides.len(), 1);
}
}

View File

@ -115,7 +115,7 @@ fn can_collect_garbage() {
fn can_handle_long_fork() {
let client_result = generate_dummy_client(1200);
let client = client_result.reference();
for _ in 0..10 {
for _ in 0..20 {
client.import_verified_blocks(&IoChannel::disconnected());
}
assert_eq!(1200, client.chain_info().best_block_number);
@ -124,7 +124,7 @@ fn can_handle_long_fork() {
push_blocks_to_client(client, 49, 1201, 800);
push_blocks_to_client(client, 53, 1201, 600);
for _ in 0..20 {
for _ in 0..40 {
client.import_verified_blocks(&IoChannel::disconnected());
}
assert_eq!(2000, client.chain_info().best_block_number);
@ -136,7 +136,7 @@ fn can_mine() {
let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]);
let client = client_result.reference();
let b = client.prepare_sealing(Address::default(), x!(31415926), vec![], vec![]).0.unwrap();
let b = client.prepare_sealing(Address::default(), 31415926.into(), vec![], vec![]).0.unwrap();
assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3());
assert!(client.try_seal(b.lock(), vec![]).is_ok());

View File

@ -98,8 +98,8 @@ pub fn create_test_block(header: &Header) -> Bytes {
fn create_unverifiable_block_header(order: u32, parent_hash: H256) -> Header {
let mut header = Header::new();
header.gas_limit = x!(0);
header.difficulty = x!(order * 100);
header.gas_limit = 0.into();
header.difficulty = (order * 100).into();
header.timestamp = (order * 10) as u64;
header.number = order as u64;
header.parent_hash = parent_hash;
@ -335,7 +335,7 @@ pub fn get_bad_state_dummy_block() -> Bytes {
block_header.timestamp = 40;
block_header.number = 1;
block_header.parent_hash = test_spec.genesis_header().hash();
block_header.state_root = x!(0xbad);
block_header.state_root = 0xbad.into();
create_test_block(&block_header)
}

View File

@ -105,10 +105,10 @@ pub struct LocalizedReceipt {
fn test_basic() {
let expected = ::rustc_serialize::hex::FromHex::from_hex("f90162a02f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee83040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap();
let r = Receipt::new(
x!("2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee"),
x!(0x40cae),
"2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee".into(),
0x40cae.into(),
vec![LogEntry {
address: x!("dcf421d093428b096ca501a7cd1a740855a7976f"),
address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(),
topics: vec![],
data: vec![0u8; 32]
}]

View File

@ -150,10 +150,10 @@ pub trait MinerService : Send + Sync {
fn last_nonce(&self, address: &Address) -> Option<U256>;
/// Suggested gas price.
fn sensible_gas_price(&self) -> U256 { x!(20000000000u64) }
fn sensible_gas_price(&self) -> U256 { 20000000000u64.into() }
/// Suggested gas limit.
fn sensible_gas_limit(&self) -> U256 { x!(21000) }
fn sensible_gas_limit(&self) -> U256 { 21000.into() }
/// Latest account balance in pending state.
fn balance(&self, chain: &BlockChainClient, address: &Address) -> U256;

View File

@ -18,14 +18,14 @@ use rayon::prelude::*;
use std::sync::atomic::AtomicBool;
use util::*;
use util::keys::store::{AccountService, AccountProvider};
use util::keys::store::AccountProvider;
use ethcore::views::{BlockView, HeaderView};
use ethcore::client::{BlockChainClient, BlockID};
use ethcore::block::{ClosedBlock, IsBlock};
use ethcore::error::*;
use ethcore::client::{Executive, Executed, EnvInfo, TransactOptions};
use ethcore::transaction::SignedTransaction;
use ethcore::receipt::{Receipt};
use ethcore::receipt::Receipt;
use ethcore::spec::Spec;
use ethcore::engine::Engine;
use super::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionImportResult, TransactionOrigin};
@ -44,7 +44,7 @@ pub struct Miner {
extra_data: RwLock<Bytes>,
spec: Spec,
accounts: RwLock<Option<Arc<AccountService>>>, // TODO: this is horrible since AccountService already contains a single RwLock field. refactor.
accounts: Option<Arc<AccountProvider>>,
}
impl Default for Miner {
@ -58,7 +58,7 @@ impl Default for Miner {
gas_floor_target: RwLock::new(U256::zero()),
author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()),
accounts: RwLock::new(None),
accounts: None,
spec: Spec::new_test(),
}
}
@ -76,13 +76,13 @@ impl Miner {
gas_floor_target: RwLock::new(U256::zero()),
author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()),
accounts: RwLock::new(None),
accounts: None,
spec: spec,
})
}
/// Creates new instance of miner
pub fn with_accounts(force_sealing: bool, spec: Spec, accounts: Arc<AccountService>) -> Arc<Miner> {
pub fn with_accounts(force_sealing: bool, spec: Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
Arc::new(Miner {
transaction_queue: Mutex::new(TransactionQueue::new()),
force_sealing: force_sealing,
@ -92,7 +92,7 @@ impl Miner {
gas_floor_target: RwLock::new(U256::zero()),
author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()),
accounts: RwLock::new(Some(accounts)),
accounts: Some(accounts),
spec: spec,
})
}
@ -137,7 +137,7 @@ impl Miner {
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, .. })) => {
trace!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?}", hash);
// Exit early if gas left is smaller then min_tx_gas
let min_tx_gas: U256 = x!(21000); // TODO: figure this out properly.
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
if gas_limit - gas_used < min_tx_gas {
break;
}
@ -177,9 +177,8 @@ impl Miner {
if !block.transactions().is_empty() {
trace!(target: "miner", "prepare_sealing: block has transaction - attempting internal seal.");
// block with transactions - see if we can seal immediately.
let a = self.accounts.read().unwrap();
let s = self.engine().generate_seal(block.block(), match *a.deref() {
Some(ref x) => Some(x.deref() as &AccountProvider),
let s = self.engine().generate_seal(block.block(), match self.accounts {
Some(ref x) => Some(&**x),
None => None,
});
if let Some(seal) = s {
@ -337,11 +336,11 @@ impl MinerService for Miner {
fn sensible_gas_price(&self) -> U256 {
// 10% above our minimum.
*self.transaction_queue.lock().unwrap().minimal_gas_price() * x!(110) / x!(100)
*self.transaction_queue.lock().unwrap().minimal_gas_price() * 110.into() / 100.into()
}
fn sensible_gas_limit(&self) -> U256 {
*self.gas_floor_target.read().unwrap() / x!(5)
*self.gas_floor_target.read().unwrap() / 5.into()
}
fn transactions_limit(&self) -> usize {

View File

@ -76,13 +76,13 @@ API and Console Options:
interface. APIS is a comma-delimited list of API
name. Possible name are web3, eth, net, personal,
ethcore, traces.
[default: web3,eth,net,personal,ethcore,traces].
[default: web3,eth,net,personal,traces].
--ipc-off Disable JSON-RPC over IPC service.
--ipc-path PATH Specify custom path for JSON-RPC over IPC service
[default: $HOME/.parity/jsonrpc.ipc].
--ipc-apis APIS Specify custom API set available via JSON-RPC over
IPC [default: web3,eth,net,personal,ethcore].
IPC [default: web3,eth,net,personal,traces].
--dapps-off Disable the Dapps server (e.g. status page).
--dapps-port PORT Specify the port portion of the Dapps server

View File

@ -242,12 +242,14 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM> where
let res = match status.state {
SyncState::Idle => SyncStatus::None,
SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead => {
let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number);
let info = SyncInfo {
starting_block: U256::from(status.start_block_number),
current_block: U256::from(take_weak!(self.client).chain_info().best_block_number),
current_block: current_block,
highest_block: U256::from(status.highest_block_number.unwrap_or(status.start_block_number))
};
match info.highest_block > info.starting_block + U256::from(6) {
match info.highest_block > info.current_block + U256::from(6) {
true => SyncStatus::Info(info),
false => SyncStatus::None,
}

View File

@ -19,41 +19,125 @@ use std::collections::HashMap;
use std::sync::Arc;
use std::str::FromStr;
use ethcore::client::{BlockChainClient, Client, ClientConfig};
use ethcore::spec::Genesis;
use ethcore::ids::BlockID;
use ethcore::client::{Client, BlockChainClient, ClientConfig};
use ethcore::spec::{Genesis, Spec};
use ethcore::block::Block;
use ethcore::views::BlockView;
use ethcore::ethereum;
use ethcore::transaction::{Transaction, Action};
use ethminer::{MinerService, ExternalMiner};
use ethminer::{Miner, MinerService, ExternalMiner};
use devtools::RandomTempPath;
use util::Hashable;
use util::io::IoChannel;
use util::hash::Address;
use util::numbers::{Uint, U256};
use util::hash::{Address, H256};
use util::numbers::U256;
use util::keys::{AccountProvider, TestAccount, TestAccountProvider};
use jsonrpc_core::IoHandler;
use ethjson::blockchain::BlockChain;
use v1::traits::eth::Eth;
use v1::impls::EthClient;
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService};
use v1::traits::eth::{Eth, EthSigning};
use v1::impls::{EthClient, EthSigningUnsafeClient};
use v1::tests::helpers::{TestSyncProvider, Config};
fn account_provider() -> Arc<TestAccountProvider> {
let mut accounts = HashMap::new();
accounts.insert(Address::from(1), TestAccount::new("test"));
let ap = TestAccountProvider::new(accounts);
Arc::new(ap)
}
fn sync_provider() -> Arc<TestSyncProvider> {
Arc::new(TestSyncProvider::new(Config {
network_id: U256::from(3),
num_peers: 120,
}))
}
fn miner_service(spec: Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
Miner::with_accounts(true, spec, accounts)
}
fn make_spec(chain: &BlockChain) -> Spec {
let genesis = Genesis::from(chain.genesis());
let mut spec = ethereum::new_frontier_test();
let state = chain.pre_state.clone().into();
spec.set_genesis_state(state);
spec.overwrite_genesis_params(genesis);
assert!(spec.is_state_root_valid());
spec
}
struct EthTester {
_client: Arc<BlockChainClient>,
_miner: Arc<MinerService>,
client: Arc<Client>,
accounts: Arc<TestAccountProvider>,
handler: IoHandler,
}
impl EthTester {
fn from_chain(chain: &BlockChain) -> Self {
let tester = Self::from_spec_provider(|| make_spec(chain));
for b in &chain.blocks_rlp() {
if Block::is_good(&b) {
let _ = tester.client.import_block(b.clone());
tester.client.flush_queue();
tester.client.import_verified_blocks(&IoChannel::disconnected());
}
}
tester.client.flush_queue();
assert!(tester.client.chain_info().best_block_hash == chain.best_block.clone().into());
tester
}
fn from_spec_provider<F>(spec_provider: F) -> Self
where F: Fn() -> Spec {
let dir = RandomTempPath::new();
let client = Client::new(ClientConfig::default(), spec_provider(), dir.as_path(), IoChannel::disconnected()).unwrap();
let sync_provider = sync_provider();
let account_provider = account_provider();
let miner_service = miner_service(spec_provider(), account_provider.clone());
let external_miner = Arc::new(ExternalMiner::default());
let eth_client = EthClient::new(
&client,
&sync_provider,
&account_provider,
&miner_service,
&external_miner
);
let eth_sign = EthSigningUnsafeClient::new(
&client,
&account_provider,
&miner_service
);
let handler = IoHandler::new();
handler.add_delegate(eth_client.to_delegate());
handler.add_delegate(eth_sign.to_delegate());
EthTester {
_miner: miner_service,
client: client,
accounts: account_provider,
handler: handler,
}
}
}
#[test]
fn harness_works() {
let chain: BlockChain = extract_chain!("BlockchainTests/bcUncleTest");
chain_harness(chain, |_| {});
let _ = EthTester::from_chain(&chain);
}
#[test]
fn eth_get_balance() {
let chain = extract_chain!("BlockchainTests/bcWalletTest", "wallet2outOf3txs");
chain_harness(chain, |tester| {
let tester = EthTester::from_chain(&chain);
// final account state
let req_latest = r#"{
"jsonrpc": "2.0",
@ -74,13 +158,12 @@ fn eth_get_balance() {
let res_new_acc = r#"{"jsonrpc":"2.0","result":"0x00","id":3}"#.to_owned();
assert_eq!(tester.handler.handle_request(req_new_acc).unwrap(), res_new_acc);
});
}
#[test]
fn eth_block_number() {
let chain = extract_chain!("BlockchainTests/bcRPC_API_Test");
chain_harness(chain, |tester| {
let tester = EthTester::from_chain(&chain);
let req_number = r#"{
"jsonrpc": "2.0",
"method": "eth_blockNumber",
@ -90,16 +173,67 @@ fn eth_block_number() {
let res_number = r#"{"jsonrpc":"2.0","result":"0x20","id":1}"#.to_owned();
assert_eq!(tester.handler.handle_request(req_number).unwrap(), res_number);
});
}
#[cfg(test)]
// a frontier-like test with an expanded gas limit and balance on known account.
const TRANSACTION_COUNT_SPEC: &'static [u8] = br#"{
"name": "Frontier (Test)",
"engine": {
"Ethash": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"durationLimit": "0x0d",
"blockReward": "0x4563918244F40000",
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
}
}
},
"params": {
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x50000",
"networkID" : "0x1"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x0000000000000042",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x400000000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
"gasLimit": "0x50000"
},
"accounts": {
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"faa34835af5c2ea724333018a515fbb7d5bc0b33": { "balance": "10000000000000", "nonce": "0" }
}
}
"#;
#[test]
fn eth_transaction_count() {
let chain = extract_chain!("BlockchainTests/bcRPC_API_Test");
chain_harness(chain, |tester| {
let address = tester.accounts.new_account("123").unwrap();
let secret = tester.accounts.account_secret(&address).unwrap();
use util::crypto::Secret;
let address = Address::from_str("faa34835af5c2ea724333018a515fbb7d5bc0b33").unwrap();
let secret = Secret::from_str("8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2").unwrap();
let tester = EthTester::from_spec_provider(|| Spec::load(TRANSACTION_COUNT_SPEC));
tester.accounts.accounts.write().unwrap().insert(address, TestAccount {
unlocked: false,
password: "123".into(),
secret: secret
});
let req_before = r#"{
"jsonrpc": "2.0",
@ -112,32 +246,21 @@ fn eth_transaction_count() {
assert_eq!(tester.handler.handle_request(&req_before).unwrap(), res_before);
let t = Transaction {
nonce: U256::zero(),
gas_price: U256::from(0x9184e72a000u64),
gas: U256::from(0x76c0),
action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
value: U256::from(0x9184e72au64),
data: vec![]
}.sign(&secret);
let req_send_trans = r#"{
"jsonrpc": "2.0",
"method": "eth_sendTransaction",
"params": [{
"from": ""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"gas": "0x30000",
"gasPrice": "0x01",
"value": "0x9184e72a"
}],
"id": 16
}"#;
let res_send_trans = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", t.hash()).as_ref() + r#"","id":16}"#;
// dispatch the transaction.
assert_eq!(tester.handler.handle_request(&req_send_trans).unwrap(), res_send_trans);
tester.handler.handle_request(&req_send_trans).unwrap();
// we have submitted the transaction -- but this shouldn't be reflected in a "latest" query.
let req_after_latest = r#"{
@ -162,68 +285,76 @@ fn eth_transaction_count() {
let res_after_pending = r#"{"jsonrpc":"2.0","result":"0x01","id":18}"#;
assert_eq!(&tester.handler.handle_request(&req_after_pending).unwrap(), res_after_pending);
});
}
fn account_provider() -> Arc<TestAccountProvider> {
let mut accounts = HashMap::new();
accounts.insert(Address::from(1), TestAccount::new("test"));
let ap = TestAccountProvider::new(accounts);
Arc::new(ap)
fn verify_transaction_counts(name: String, chain: BlockChain) {
struct PanicHandler(String);
impl Drop for PanicHandler {
fn drop(&mut self) {
if ::std::thread::panicking() {
println!("Test failed: {}", self.0);
}
fn sync_provider() -> Arc<TestSyncProvider> {
Arc::new(TestSyncProvider::new(Config {
network_id: U256::from(3),
num_peers: 120,
}))
}
fn miner_service() -> Arc<TestMinerService> {
Arc::new(TestMinerService::default())
}
// given a blockchain, this harness will create an EthClient wrapping it
// which tests can pass specially crafted requests to.
fn chain_harness<F, U>(chain: BlockChain, mut cb: F) -> U
where F: FnMut(&EthTester) -> U {
let genesis = Genesis::from(chain.genesis());
let mut spec = ethereum::new_frontier_test();
let state = chain.pre_state.clone().into();
spec.set_genesis_state(state);
spec.overwrite_genesis_params(genesis);
assert!(spec.is_state_root_valid());
let dir = RandomTempPath::new();
let client = Client::new(ClientConfig::default(), spec, dir.as_path(), IoChannel::disconnected()).unwrap();
let sync_provider = sync_provider();
let miner_service = miner_service();
let account_provider = account_provider();
let external_miner = Arc::new(ExternalMiner::default());
for b in &chain.blocks_rlp() {
if Block::is_good(&b) {
let _ = client.import_block(b.clone());
client.flush_queue();
client.import_verified_blocks(&IoChannel::disconnected());
}
}
assert!(client.chain_info().best_block_hash == chain.best_block.into());
let _panic = PanicHandler(name);
let eth_client = EthClient::new(&client, &sync_provider, &account_provider,
&miner_service, &external_miner);
fn by_hash(hash: H256, count: usize, id: &mut usize) -> (String, String) {
let req = r#"{
"jsonrpc": "2.0",
"method": "eth_getBlockTransactionCountByHash",
"params": [
""#.to_owned() + format!("0x{:?}", hash).as_ref() + r#""
],
"id": "# + format!("{}", *id).as_ref() + r#"
}"#;
let handler = IoHandler::new();
let delegate = eth_client.to_delegate();
handler.add_delegate(delegate);
let tester = EthTester {
_miner: miner_service,
_client: client,
accounts: account_provider,
handler: handler,
};
cb(&tester)
let res = r#"{"jsonrpc":"2.0","result":""#.to_owned()
+ format!("0x{:02x}", count).as_ref()
+ r#"","id":"#
+ format!("{}", *id).as_ref() + r#"}"#;
*id += 1;
(req, res)
}
fn by_number(num: u64, count: usize, id: &mut usize) -> (String, String) {
let req = r#"{
"jsonrpc": "2.0",
"method": "eth_getBlockTransactionCountByNumber",
"params": [
"#.to_owned() + &::serde_json::to_string(&U256::from(num)).unwrap() + r#"
],
"id": "# + format!("{}", *id).as_ref() + r#"
}"#;
let res = r#"{"jsonrpc":"2.0","result":""#.to_owned()
+ format!("0x{:02x}", count).as_ref()
+ r#"","id":"#
+ format!("{}", *id).as_ref() + r#"}"#;
*id += 1;
(req, res)
}
let tester = EthTester::from_chain(&chain);
let mut id = 1;
for b in chain.blocks_rlp().iter().filter(|b| Block::is_good(b)).map(|b| BlockView::new(b)) {
let count = b.transactions_count();
let hash = b.sha3();
let number = b.header_view().number();
let (req, res) = by_hash(hash, count, &mut id);
assert_eq!(tester.handler.handle_request(&req), Some(res));
// uncles can share block numbers, so skip them.
if tester.client.block_hash(BlockID::Number(number)) == Some(hash) {
let (req, res) = by_number(number, count, &mut id);
assert_eq!(tester.handler.handle_request(&req), Some(res));
}
}
}
register_test!(eth_transaction_count_1, verify_transaction_counts, "BlockchainTests/bcWalletTest");
register_test!(eth_transaction_count_2, verify_transaction_counts, "BlockchainTests/bcTotalDifficultyTest");
register_test!(eth_transaction_count_3, verify_transaction_counts, "BlockchainTests/bcGasPricerTest");

View File

@ -110,6 +110,7 @@ fn rpc_eth_syncing() {
status.state = SyncState::Blocks;
status.highest_block_number = Some(2500);
// "sync" to 1000 blocks.
// causes TestBlockChainClient to return 1000 for its best block number.
let mut blocks = tester.client.blocks.write().unwrap();
for i in 0..1000 {
@ -119,6 +120,16 @@ fn rpc_eth_syncing() {
let true_res = r#"{"jsonrpc":"2.0","result":{"currentBlock":"0x03e8","highestBlock":"0x09c4","startingBlock":"0x00"},"id":1}"#;
assert_eq!(tester.io.handle_request(request), Some(true_res.to_owned()));
{
// finish "syncing"
let mut blocks = tester.client.blocks.write().unwrap();
for i in 0..1500 {
blocks.insert(H256::from(i + 1000), Vec::new());
}
}
assert_eq!(tester.io.handle_request(request), Some(false_res.to_owned()));
}
#[test]

View File

@ -13,11 +13,15 @@ pub mod helpers;
// extract the chain with that name. This will panic if no chain by that name
// is found.
macro_rules! extract_chain {
($file:expr, $name:expr) => {{
(iter $file:expr) => {{
const RAW_DATA: &'static [u8] =
include_bytes!(concat!("../../../../ethcore/res/ethereum/tests/", $file, ".json"));
::ethjson::blockchain::Test::load(RAW_DATA).unwrap().into_iter()
}};
($file:expr, $name:expr) => {{
let mut chain = None;
for (name, c) in ::ethjson::blockchain::Test::load(RAW_DATA).unwrap() {
for (name, c) in extract_chain!(iter $file) {
if name == $name {
chain = Some(c);
break;
@ -27,14 +31,41 @@ macro_rules! extract_chain {
}};
($file:expr) => {{
const RAW_DATA: &'static [u8] =
include_bytes!(concat!("../../../../ethcore/res/ethereum/tests/", $file, ".json"));
::ethjson::blockchain::Test::load(RAW_DATA)
.unwrap().into_iter().next().unwrap().1
extract_chain!(iter $file).next().unwrap().1
}};
}
macro_rules! register_test {
($name:ident, $cb:expr, $file:expr) => {
#[test]
fn $name() {
for (name, chain) in extract_chain!(iter $file) {
$cb(name, chain);
}
}
};
(heavy $name:ident, $cb:expr, $file:expr) => {
#[test]
#[cfg(feature = "test-heavy")]
fn $name() {
for (name, chain) in extract_chain!(iter $file) {
$cb(name, chain);
}
}
};
(ignore $name:ident, $cb:expr, $file:expr) => {
#[test]
#[ignore]
fn $name() {
for (name, chain) in extract_chain!(iter $file) {
$cb(name, chain);
}
}
};
}
#[cfg(test)]
mod mocked;
#[cfg(test)]

View File

@ -113,11 +113,12 @@ const MAX_HEADERS_TO_SEND: usize = 512;
const MAX_NODE_DATA_TO_SEND: usize = 1024;
const MAX_RECEIPTS_TO_SEND: usize = 1024;
const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256;
const MAX_HEADERS_TO_REQUEST: usize = 256;
const MAX_HEADERS_TO_REQUEST: usize = 128;
const MAX_BODIES_TO_REQUEST: usize = 64;
const MIN_PEERS_PROPAGATION: usize = 4;
const MAX_PEERS_PROPAGATION: usize = 128;
const MAX_PEER_LAG_PROPAGATION: BlockNumber = 20;
const SUBCHAIN_SIZE: usize = 64;
const STATUS_PACKET: u8 = 0x00;
const NEW_BLOCK_HASHES_PACKET: u8 = 0x01;
@ -133,7 +134,7 @@ const NODE_DATA_PACKET: u8 = 0x0e;
const GET_RECEIPTS_PACKET: u8 = 0x0f;
const RECEIPTS_PACKET: u8 = 0x10;
const CONNECTION_TIMEOUT_SEC: f64 = 10f64;
const CONNECTION_TIMEOUT_SEC: f64 = 15f64;
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
/// Sync state
@ -639,7 +640,7 @@ impl ChainSync {
self.sync_peer(io, p, false);
}
}
if !self.peers.values().any(|p| p.asking != PeerAsking::Nothing) {
if self.state != SyncState::Waiting && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing) {
self.complete_sync();
}
}
@ -665,7 +666,7 @@ impl ChainSync {
return;
}
if self.state == SyncState::Waiting {
trace!(target: "sync", "Waiting for block queue");
trace!(target: "sync", "Waiting for the block queue");
return;
}
(peer.latest_hash.clone(), peer.difficulty.clone())
@ -689,7 +690,7 @@ impl ChainSync {
// Request subchain headers
trace!(target: "sync", "Starting sync with better chain");
let last = self.last_imported_hash.clone();
self.request_headers_by_hash(io, peer_id, &last, 128, 255, false, PeerAsking::Heads);
self.request_headers_by_hash(io, peer_id, &last, SUBCHAIN_SIZE, MAX_HEADERS_TO_REQUEST - 1, false, PeerAsking::Heads);
},
SyncState::Blocks | SyncState::NewBlocks => {
if io.chain().block_status(BlockID::Hash(peer_latest)) == BlockStatus::Unknown {
@ -704,6 +705,8 @@ impl ChainSync {
fn start_sync_round(&mut self, io: &mut SyncIo) {
self.state = SyncState::ChainHead;
trace!(target: "sync", "Starting round (last imported count = {:?}, block = {:?}", self.imported_this_round, self.last_imported_block);
// Check if need to retract to find the common block. The problem is that the peers still return headers by hash even
// from the non-canonical part of the tree. So we also retract if nothing has been imported last round.
if self.imported_this_round.is_some() && self.imported_this_round.unwrap() == 0 && self.last_imported_block > 0 {
match io.chain().block_hash(BlockID::Number(self.last_imported_block - 1)) {
Some(h) => {
@ -781,9 +784,13 @@ impl ChainSync {
match io.chain().import_block(block) {
Err(Error::Import(ImportError::AlreadyInChain)) => {
self.last_imported_block = number;
self.last_imported_hash = h.clone();
trace!(target: "sync", "Block already in chain {:?}", h);
},
Err(Error::Import(ImportError::AlreadyQueued)) => {
self.last_imported_block = number;
self.last_imported_hash = h.clone();
trace!(target: "sync", "Block already queued {:?}", h);
},
Ok(_) => {
@ -856,22 +863,15 @@ impl ChainSync {
/// Generic request sender
fn send_request(&mut self, sync: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) {
{
let peer = self.peers.get_mut(&peer_id).unwrap();
if peer.asking != PeerAsking::Nothing {
warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking);
}
}
match sync.send(peer_id, packet_id, packet) {
Err(e) => {
debug!(target:"sync", "Error sending request: {:?}", e);
sync.disable_peer(peer_id);
}
Ok(_) => {
let mut peer = self.peers.get_mut(&peer_id).unwrap();
peer.asking = asking;
peer.ask_time = time::precise_time_s();
}
if let Err(e) = sync.send(peer_id, packet_id, packet) {
debug!(target:"sync", "Error sending request: {:?}", e);
sync.disable_peer(peer_id);
}
}
@ -1099,6 +1099,7 @@ impl ChainSync {
let tick = time::precise_time_s();
for (peer_id, peer) in &self.peers {
if peer.asking != PeerAsking::Nothing && (tick - peer.ask_time) > CONNECTION_TIMEOUT_SEC {
trace!(target:"sync", "Timeouted {}", peer_id);
io.disconnect_peer(*peer_id);
}
}
@ -1164,24 +1165,23 @@ impl ChainSync {
.collect::<Vec<_>>()
}
fn select_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> Vec<(PeerId, BlockNumber)> {
use rand::Rng;
let mut lagging_peers = self.get_lagging_peers(chain_info, io);
// take sqrt(x) peers
let mut count = (self.peers.len() as f64).powf(0.5).round() as usize;
count = min(count, MAX_PEERS_PROPAGATION);
count = max(count, MIN_PEERS_PROPAGATION);
::rand::thread_rng().shuffle(&mut lagging_peers);
lagging_peers.into_iter().take(count).collect::<Vec<_>>()
}
/// propagates latest block to lagging peers
fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> usize {
let updated_peers = {
let lagging_peers = self.get_lagging_peers(chain_info, io);
// sqrt(x)/x scaled to max u32
let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32;
let lucky_peers = match lagging_peers.len() {
0 ... MIN_PEERS_PROPAGATION => lagging_peers,
_ => lagging_peers.into_iter().filter(|_| ::rand::random::<u32>() < fraction).collect::<Vec<_>>()
};
// taking at max of MAX_PEERS_PROPAGATION
lucky_peers.iter().map(|&(id, _)| id.clone()).take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).collect::<Vec<PeerId>>()
};
let lucky_peers = self.select_lagging_peers(chain_info, io);
let mut sent = 0;
for peer_id in updated_peers {
for (peer_id, _) in lucky_peers {
let rlp = ChainSync::create_latest_block_rlp(io.chain());
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp);
self.peers.get_mut(&peer_id).unwrap().latest_hash = chain_info.best_block_hash.clone();
@ -1193,12 +1193,12 @@ impl ChainSync {
/// propagates new known hashes to all peers
fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> usize {
let updated_peers = self.get_lagging_peers(chain_info, io);
let lucky_peers = self.select_lagging_peers(chain_info, io);
let mut sent = 0;
let last_parent = HeaderView::new(&io.chain().block_header(BlockID::Hash(chain_info.best_block_hash.clone())).unwrap()).parent_hash();
for (peer_id, peer_number) in updated_peers {
for (peer_id, peer_number) in lucky_peers {
let mut peer_best = self.peers.get(&peer_id).unwrap().latest_hash.clone();
if chain_info.best_block_number - peer_number > MAX_PEERS_PROPAGATION as BlockNumber {
if chain_info.best_block_number - peer_number > MAX_PEER_LAG_PROPAGATION as BlockNumber {
// If we think peer is too far behind just send one latest hash
peer_best = last_parent.clone();
}
@ -1259,15 +1259,15 @@ impl ChainSync {
}
fn propagate_latest_blocks(&mut self, io: &mut SyncIo) {
self.propagate_new_transactions(io);
let chain_info = io.chain().chain_info();
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let blocks = self.propagate_blocks(&chain_info, io);
let hashes = self.propagate_new_hashes(&chain_info, io);
let blocks = self.propagate_blocks(&chain_info, io);
if blocks != 0 || hashes != 0 {
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
}
}
self.propagate_new_transactions(io);
self.last_sent_block_number = chain_info.best_block_number;
}
@ -1310,8 +1310,8 @@ mod tests {
fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes {
let mut header = Header::new();
header.gas_limit = x!(0);
header.difficulty = x!(order * 100);
header.gas_limit = 0.into();
header.difficulty = (order * 100).into();
header.timestamp = (order * 10) as u64;
header.number = order as u64;
header.parent_hash = parent_hash;
@ -1327,7 +1327,7 @@ mod tests {
fn get_dummy_blocks(order: u32, parent_hash: H256) -> Bytes {
let mut rlp = RlpStream::new_list(1);
rlp.append_raw(&get_dummy_block(order, parent_hash), 1);
let difficulty: U256 = x!(100 * order);
let difficulty: U256 = (100 * order).into();
rlp.append(&difficulty);
rlp.out()
}

View File

@ -159,7 +159,7 @@ fn propagate_hashes() {
#[test]
fn propagate_blocks() {
let mut net = TestNet::new(2);
let mut net = TestNet::new(20);
net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle);
net.sync();
@ -169,7 +169,8 @@ fn propagate_blocks() {
assert!(!net.peer(0).queue.is_empty());
// NEW_BLOCK_PACKET
assert_eq!(0x07, net.peer(0).queue[0].packet_id);
let blocks = net.peer(0).queue.iter().filter(|p| p.packet_id == 0x7).count();
assert!(blocks > 0);
}
#[test]

View File

@ -26,44 +26,50 @@ pub use sha3::*;
#[macro_export]
macro_rules! hash_map {
( $( $x:expr => $y:expr ),* ) => {
vec![ $( ($x, $y) ),* ].into_iter().collect::<HashMap<_, _>>()
}
() => { HashMap::new() };
( $( $x:expr => $y:expr ),* ) => {{
let mut x = HashMap::new();
$(
x.insert($x, $y);
)*
x
}}
}
#[macro_export]
macro_rules! hash_mapx {
( $( $x:expr => $y:expr ),* ) => {
vec![ $( ( From::from($x), From::from($y) ) ),* ].into_iter().collect::<HashMap<_, _>>()
}
macro_rules! hash_map_into {
() => { HashMap::new() };
( $( $x:expr => $y:expr ),* ) => {{
let mut x = HashMap::new();
$(
x.insert($x.into(), $y.into());
)*
x
}}
}
#[macro_export]
macro_rules! map {
( $( $x:expr => $y:expr ),* ) => {
vec![ $( ($x, $y) ),* ].into_iter().collect::<BTreeMap<_, _>>()
}
() => { BTreeMap::new() };
( $( $x:expr => $y:expr ),* ) => {{
let mut x = BTreeMap::new();
$(
x.insert($x, $y);
)*
x
}}
}
#[macro_export]
macro_rules! mapx {
( $( $x:expr => $y:expr ),* ) => {
vec![ $( ( From::from($x), From::from($y) ) ),* ].into_iter().collect::<BTreeMap<_, _>>()
}
}
#[macro_export]
macro_rules! x {
( $x:expr ) => {
From::from($x)
}
}
#[macro_export]
macro_rules! xx {
( $x:expr ) => {
From::from(From::from($x))
}
macro_rules! map_into {
() => { BTreeMap::new() };
( $( $x:expr => $y:expr ),* ) => {{
let mut x = BTreeMap::new();
$(
x.insert($x.into(), $y.into());
)*
x
}}
}
#[macro_export]

View File

@ -718,7 +718,7 @@ mod tests {
#[test]
fn from_and_to_u256() {
let u: U256 = x!(0x123456789abcdef0u64);
let u: U256 = 0x123456789abcdef0u64.into();
let h = H256::from(u);
assert_eq!(H256::from(u), H256::from("000000000000000000000000000000000000000000000000123456789abcdef0"));
let h_ref = H256::from(&u);

View File

@ -48,7 +48,7 @@ impl FromJson for Bytes {
impl FromJson for BTreeMap<H256, H256> {
fn from_json(json: &Json) -> Self {
match *json {
Json::Object(ref o) => o.iter().map(|(key, value)| (x!(&u256_from_str(key)), x!(&U256::from_json(value)))).collect(),
Json::Object(ref o) => o.iter().map(|(key, value)| (u256_from_str(key).into(), U256::from_json(value).into())).collect(),
_ => BTreeMap::new(),
}
}

View File

@ -325,7 +325,7 @@ impl SecretStore {
ret
}
/// Returns secret for unlocked account.
/// Returns secret for locked account.
pub fn locked_account_secret(&self, account: &Address, pass: &str) -> Result<crypto::Secret, SigningError> {
let secret_id = try!(self.account(&account).ok_or(SigningError::NoAccount));
self.get(&secret_id, pass).or_else(|e| Err(match e {
@ -554,7 +554,7 @@ mod tests {
H256::from_str("517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2").unwrap(),
262144,
32));
key_file.account = Some(x!(i as u64));
key_file.account = Some((i as u64).into());
result.push(key_file.id.clone());
write_sstore.import_key(key_file).unwrap();
}
@ -627,7 +627,7 @@ mod tests {
sstore.sign(&address, &H256::random()).unwrap()
};
assert!(signature != x!(0));
assert!(signature != 0.into());
}
#[test]