Merge branch 'fixed_hash_util' into move_hash

This commit is contained in:
debris 2016-08-03 16:35:55 +02:00
commit 573e775ef9
109 changed files with 1740 additions and 1325 deletions

46
Cargo.lock generated
View File

@ -15,6 +15,7 @@ dependencies = [
"ethcore-ipc-codegen 1.3.0",
"ethcore-ipc-hypervisor 1.2.0",
"ethcore-ipc-nano 1.3.0",
"ethcore-ipc-tests 0.1.0",
"ethcore-logger 1.3.0",
"ethcore-rpc 1.3.0",
"ethcore-signer 1.3.0",
@ -78,6 +79,19 @@ dependencies = [
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bit-set"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bit-vec"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitflags"
version = "0.3.3"
@ -244,6 +258,7 @@ dependencies = [
name = "ethcore"
version = "1.3.0"
dependencies = [
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
@ -265,7 +280,6 @@ dependencies = [
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -332,7 +346,6 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -340,11 +353,25 @@ name = "ethcore-ipc-nano"
version = "1.3.0"
dependencies = [
"ethcore-ipc 1.3.0",
"jsonrpc-core 2.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
]
[[package]]
name = "ethcore-ipc-tests"
version = "0.1.0"
dependencies = [
"ethcore-devtools 1.3.0",
"ethcore-ipc 1.3.0",
"ethcore-ipc-codegen 1.3.0",
"ethcore-ipc-nano 1.3.0",
"ethcore-util 1.3.0",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ethcore-logger"
version = "1.3.0"
@ -492,7 +519,6 @@ dependencies = [
"parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -620,7 +646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "json-ipc-server"
version = "0.2.4"
source = "git+https://github.com/ethcore/json-ipc-server.git#93c2756f669c6a1872dec1ef755a0870f40c03c3"
source = "git+https://github.com/ethcore/json-ipc-server.git#7a02a0f8b249fda100b9bab5f90b2081d410d8cf"
dependencies = [
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -899,7 +925,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "parity-dapps"
version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
source = "git+https://github.com/ethcore/parity-ui.git#697e860dedc45003909602a002e7743478ab173a"
dependencies = [
"aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -913,7 +939,7 @@ dependencies = [
[[package]]
name = "parity-dapps-home"
version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
source = "git+https://github.com/ethcore/parity-ui.git#697e860dedc45003909602a002e7743478ab173a"
dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
]
@ -921,7 +947,7 @@ dependencies = [
[[package]]
name = "parity-dapps-signer"
version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
source = "git+https://github.com/ethcore/parity-ui.git#697e860dedc45003909602a002e7743478ab173a"
dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
]
@ -929,7 +955,7 @@ dependencies = [
[[package]]
name = "parity-dapps-status"
version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
source = "git+https://github.com/ethcore/parity-ui.git#697e860dedc45003909602a002e7743478ab173a"
dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
]
@ -937,7 +963,7 @@ dependencies = [
[[package]]
name = "parity-dapps-wallet"
version = "0.6.0"
source = "git+https://github.com/ethcore/parity-ui.git#7120546d08d4d9eb648e255c04935002223d362f"
source = "git+https://github.com/ethcore/parity-ui.git#697e860dedc45003909602a002e7743478ab173a"
dependencies = [
"parity-dapps 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
]

View File

@ -10,6 +10,7 @@ build = "build.rs"
rustc_version = "0.1"
syntex = "*"
ethcore-ipc-codegen = { path = "ipc/codegen" }
ethcore-ipc-tests = { path = "ipc/tests" }
[dependencies]
log = "0.3"
@ -56,8 +57,9 @@ default = ["ui", "use-precompiled-js"]
ui = ["dapps", "ethcore-signer/ui"]
use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"]
dapps = ["ethcore-dapps"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"]
ipc = ["ethcore/ipc"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"]
json-tests = ["ethcore/json-tests"]
[[bin]]
path = "parity/main.rs"

View File

@ -25,12 +25,6 @@ use std::mem;
use ipc::binary::BinaryConvertError;
use std::collections::{VecDeque, HashMap, BTreeMap};
impl From<String> for Error {
fn from(s: String) -> Error {
Error::RocksDb(s)
}
}
enum WriteCacheEntry {
Remove,
Write(Vec<u8>),

View File

@ -31,8 +31,8 @@ pub struct KeyValue {
pub value: Vec<u8>,
}
#[derive(Debug, Binary)]
pub enum Error {
#[derive(Debug, Binary)]
pub enum Error {
AlreadyOpen,
IsClosed,
RocksDb(String),
@ -41,6 +41,12 @@ pub struct KeyValue {
UncommitedTransactions,
}
impl From<String> for Error {
fn from(s: String) -> Error {
Error::RocksDb(s)
}
}
/// Database configuration
#[derive(Binary)]
pub struct DatabaseConfig {
@ -68,7 +74,7 @@ impl DatabaseConfig {
}
}
pub trait DatabaseService : Sized {
pub trait DatabaseService : Sized {
/// Opens database in the specified path
fn open(&self, config: DatabaseConfig, path: String) -> Result<(), Error>;

View File

@ -62,6 +62,7 @@ impl TestSocket {
impl Read for TestSocket {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let end_position = cmp::min(self.read_buffer.len(), self.cursor+buf.len());
if self.cursor > end_position { return Ok(0) }
let len = cmp::max(end_position - self.cursor, 0);
match len {
0 => Ok(0),
@ -69,7 +70,7 @@ impl Read for TestSocket {
for i in self.cursor..end_position {
buf[i-self.cursor] = self.read_buffer[i];
}
self.cursor = self.cursor + buf.len();
self.cursor = end_position;
Ok(len)
}
}

View File

@ -8,7 +8,6 @@ authors = ["Ethcore <admin@ethcore.io>"]
build = "build.rs"
[build-dependencies]
syntex = "*"
"ethcore-ipc-codegen" = { path = "../ipc/codegen" }
[dependencies]
@ -17,21 +16,22 @@ env_logger = "0.3"
rustc-serialize = "0.3"
heapsize = "0.3"
rust-crypto = "0.2.34"
time = "0.1"
ethcore-util = { path = "../util" }
evmjit = { path = "../evmjit", optional = true }
ethash = { path = "../ethash" }
num_cpus = "0.2"
clippy = { version = "0.0.79", optional = true}
crossbeam = "0.2.9"
lazy_static = "0.2"
bloomchain = "0.1"
rayon = "0.3.1"
semver = "0.2"
bit-set = "0.4"
time = "0.1"
evmjit = { path = "../evmjit", optional = true }
clippy = { version = "0.0.79", optional = true}
ethash = { path = "../ethash" }
ethcore-util = { path = "../util" }
ethcore-devtools = { path = "../devtools" }
ethjson = { path = "../json" }
bloomchain = "0.1"
ethcore-ipc = { path = "../ipc/rpc" }
rayon = "0.3.1"
ethstore = { path = "../ethstore" }
semver = "0.2"
ethcore-ipc-nano = { path = "../ipc/nano" }
[dependencies.hyper]

View File

@ -14,48 +14,10 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate syntex;
extern crate ethcore_ipc_codegen as codegen;
use std::env;
use std::path::Path;
extern crate ethcore_ipc_codegen;
fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
// serialization pass
{
let src = Path::new("src/types/mod.rs.in");
let dst = Path::new(&out_dir).join("types.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).unwrap();
}
// blockchain client interface
{
let src = Path::new("src/client/traits.rs");
let intermediate = Path::new(&out_dir).join("traits.intermediate.rs.in");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &intermediate).unwrap();
let dst = Path::new(&out_dir).join("traits.ipc.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &intermediate, &dst).unwrap();
}
// chain notify interface
{
let src = Path::new("src/client/chain_notify.rs");
let intermediate = Path::new(&out_dir).join("chain_notify.intermediate.rs.in");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &intermediate).unwrap();
let dst = Path::new(&out_dir).join("chain_notify.ipc.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &intermediate, &dst).unwrap();
}
ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap();
ethcore_ipc_codegen::derive_ipc("src/client/traits.rs").unwrap();
ethcore_ipc_codegen::derive_ipc("src/client/chain_notify.rs").unwrap();
}

View File

@ -0,0 +1,33 @@
{
"name": "TestInstantSeal",
"engine": {
"InstantSeal": null
},
"params": {
"accountStartNonce": "0x0100000",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x2"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x00006d6f7264656e",
"mixHash": "0x00000000000000000000000000000000000000647572616c65787365646c6578"
}
},
"difficulty": "0x20000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x",
"gasLimit": "0x2fefd8"
},
"accounts": {
"0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" }
}
}

View File

@ -107,18 +107,23 @@ impl_bridge_type!(Message, 32, H256, SSMessage);
impl_bridge_type!(Address, 20, H160, SSAddress);
struct NullDir;
#[derive(Default)]
struct NullDir {
accounts: RwLock<HashMap<SSAddress, SafeAccount>>,
}
impl KeyDirectory for NullDir {
fn load(&self) -> Result<Vec<SafeAccount>, SSError> {
Ok(vec![])
Ok(self.accounts.read().values().cloned().collect())
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, SSError> {
self.accounts.write().insert(account.address.clone(), account.clone());
Ok(account)
}
fn remove(&self, _address: &SSAddress) -> Result<(), SSError> {
fn remove(&self, address: &SSAddress) -> Result<(), SSError> {
self.accounts.write().remove(address);
Ok(())
}
}
@ -164,7 +169,7 @@ impl AccountProvider {
pub fn transient_provider() -> Self {
AccountProvider {
unlocked: RwLock::new(HashMap::new()),
sstore: Box::new(EthStore::open(Box::new(NullDir)).unwrap())
sstore: Box::new(EthStore::open(Box::new(NullDir::default())).unwrap())
}
}
@ -184,13 +189,14 @@ impl AccountProvider {
}
/// Returns addresses of all accounts.
pub fn accounts(&self) -> Vec<H160> {
self.sstore.accounts().into_iter().map(|a| H160(a.into())).collect()
pub fn accounts(&self) -> Result<Vec<H160>, Error> {
let accounts = try!(self.sstore.accounts()).into_iter().map(|a| H160(a.into())).collect();
Ok(accounts)
}
/// Returns each account along with name and meta.
pub fn accounts_info(&self) -> Result<HashMap<H160, AccountMeta>, Error> {
let r: HashMap<H160, AccountMeta> = self.sstore.accounts()
let r: HashMap<H160, AccountMeta> = try!(self.sstore.accounts())
.into_iter()
.map(|a| (H160(a.clone().into()), self.account_meta(a).unwrap_or_else(|_| Default::default())))
.collect();

View File

@ -16,7 +16,6 @@
//! Blockchain database.
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder};
use bloomchain as bc;
use util::*;
use header::*;
@ -32,6 +31,7 @@ use blockchain::update::ExtrasUpdate;
use blockchain::{CacheSize, ImportRoute, Config};
use db::{Writable, Readable, CacheUpdatePolicy};
use client::{DB_COL_EXTRA, DB_COL_HEADERS, DB_COL_BODIES};
use cache_manager::CacheManager;
const LOG_BLOOMS_LEVELS: usize = 3;
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
@ -130,11 +130,6 @@ enum CacheID {
BlockReceipts(H256),
}
struct CacheManager {
cache_usage: VecDeque<HashSet<CacheID>>,
in_use: HashSet<CacheID>,
}
impl bc::group::BloomGroupDatabase for BlockChain {
fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option<bc::group::BloomGroup> {
let position = LogGroupPosition::from(position.clone());
@ -148,8 +143,6 @@ impl bc::group::BloomGroupDatabase for BlockChain {
/// **Does not do input data verification.**
pub struct BlockChain {
// All locks must be captured in the order declared here.
pref_cache_size: AtomicUsize,
max_cache_size: AtomicUsize,
blooms_config: bc::Config,
best_block: RwLock<BestBlock>,
@ -167,7 +160,11 @@ pub struct BlockChain {
db: Arc<Database>,
cache_man: RwLock<CacheManager>,
cache_man: RwLock<CacheManager<CacheID>>,
pending_best_block: RwLock<Option<BestBlock>>,
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
pending_transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
}
impl BlockProvider for BlockChain {
@ -297,8 +294,6 @@ impl BlockProvider for BlockChain {
}
}
const COLLECTION_QUEUE_SIZE: usize = 8;
pub struct AncestryIter<'a> {
current: H256,
chain: &'a BlockChain,
@ -320,12 +315,10 @@ impl<'a> Iterator for AncestryIter<'a> {
impl BlockChain {
/// Create new instance of blockchain from given Genesis
pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain {
let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()};
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
// 400 is the avarage size of the key
let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400);
let bc = BlockChain {
pref_cache_size: AtomicUsize::new(config.pref_cache_size),
max_cache_size: AtomicUsize::new(config.max_cache_size),
blooms_config: bc::Config {
levels: LOG_BLOOMS_LEVELS,
elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX,
@ -340,6 +333,9 @@ impl BlockChain {
block_receipts: RwLock::new(HashMap::new()),
db: db.clone(),
cache_man: RwLock::new(cache_man),
pending_best_block: RwLock::new(None),
pending_block_hashes: RwLock::new(HashMap::new()),
pending_transaction_addresses: RwLock::new(HashMap::new()),
};
// load best block
@ -449,12 +445,6 @@ impl BlockChain {
None
}
/// Set the cache configuration.
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
self.pref_cache_size.store(pref_cache_size, AtomicOrder::Relaxed);
self.max_cache_size.store(max_cache_size, AtomicOrder::Relaxed);
}
/// Returns a tree route between `from` and `to`, which is a tuple of:
///
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
@ -557,6 +547,8 @@ impl BlockChain {
return ImportRoute::none();
}
assert!(self.pending_best_block.read().is_none());
let block_rlp = UntrustedRlp::new(bytes);
let compressed_header = block_rlp.at(0).unwrap().compress(RlpType::Blocks);
let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks);
@ -576,7 +568,7 @@ impl BlockChain {
);
}
self.apply_update(batch, ExtrasUpdate {
self.prepare_update(batch, ExtrasUpdate {
block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: self.prepare_block_details_update(bytes, &info),
block_receipts: self.prepare_block_receipts_update(receipts, &info),
@ -631,8 +623,8 @@ impl BlockChain {
}
}
/// Applies extras update.
fn apply_update(&self, batch: &DBTransaction, update: ExtrasUpdate) {
/// Prepares extras update.
fn prepare_update(&self, batch: &DBTransaction, update: ExtrasUpdate) {
{
for hash in update.block_details.keys().cloned() {
self.note_used(CacheID::BlockDetails(hash));
@ -655,29 +647,46 @@ impl BlockChain {
// These cached values must be updated last with all three locks taken to avoid
// cache decoherence
{
let mut best_block = self.best_block.write();
let mut best_block = self.pending_best_block.write();
// update best block
match update.info.location {
BlockLocation::Branch => (),
_ => {
batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap();
*best_block = BestBlock {
*best_block = Some(BestBlock {
hash: update.info.hash,
number: update.info.number,
total_difficulty: update.info.total_difficulty,
block: update.block.to_vec(),
};
});
}
}
let mut write_hashes = self.block_hashes.write();
let mut write_txs = self.transaction_addresses.write();
let mut write_hashes = self.pending_block_hashes.write();
let mut write_txs = self.pending_transaction_addresses.write();
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Remove);
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Remove);
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite);
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite);
}
}
/// Applt pending insertion updates
pub fn commit(&self) {
let mut best_block = self.best_block.write();
let mut write_hashes = self.block_hashes.write();
let mut write_txs = self.transaction_addresses.write();
let mut pending_best_block = self.pending_best_block.write();
let mut pending_write_hashes = self.pending_block_hashes.write();
let mut pending_write_txs = self.pending_transaction_addresses.write();
// update best block
if let Some(block) = pending_best_block.take() {
*best_block = block;
}
write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new()));
write_txs.extend(mem::replace(&mut *pending_write_txs, HashMap::new()));
}
/// Iterator that lists `first` and then all of `first`'s ancestors, by hash.
pub fn ancestry_iter(&self, first: H256) -> Option<AncestryIter> {
if self.is_known(&first) {
@ -874,74 +883,40 @@ impl BlockChain {
/// Let the cache system know that a cacheable item has been used.
fn note_used(&self, id: CacheID) {
let mut cache_man = self.cache_man.write();
if !cache_man.cache_usage[0].contains(&id) {
cache_man.cache_usage[0].insert(id.clone());
if cache_man.in_use.contains(&id) {
if let Some(c) = cache_man.cache_usage.iter_mut().skip(1).find(|e|e.contains(&id)) {
c.remove(&id);
}
} else {
cache_man.in_use.insert(id);
}
}
cache_man.note_used(id);
}
/// Ticks our cache system and throws out any old data.
pub fn collect_garbage(&self) {
if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) {
// rotate cache
let mut cache_man = self.cache_man.write();
const AVERAGE_BYTES_PER_CACHE_ENTRY: usize = 400; //estimated
if cache_man.cache_usage[0].len() > self.pref_cache_size.load(AtomicOrder::Relaxed) / COLLECTION_QUEUE_SIZE / AVERAGE_BYTES_PER_CACHE_ENTRY {
trace!("Cache rotation, cache_size = {}", self.cache_size().total());
let cache = cache_man.cache_usage.pop_back().unwrap();
cache_man.cache_usage.push_front(cache);
}
return;
}
let mut cache_man = self.cache_man.write();
cache_man.collect_garbage(|| self.cache_size().total(), | ids | {
let mut block_headers = self.block_headers.write();
let mut block_bodies = self.block_bodies.write();
let mut block_details = self.block_details.write();
let mut block_hashes = self.block_hashes.write();
let mut transaction_addresses = self.transaction_addresses.write();
let mut blocks_blooms = self.blocks_blooms.write();
let mut block_receipts = self.block_receipts.write();
for i in 0..COLLECTION_QUEUE_SIZE {
{
trace!("Cache cleanup round started {}, cache_size = {}", i, self.cache_size().total());
let mut block_headers = self.block_headers.write();
let mut block_bodies = self.block_bodies.write();
let mut block_details = self.block_details.write();
let mut block_hashes = self.block_hashes.write();
let mut transaction_addresses = self.transaction_addresses.write();
let mut blocks_blooms = self.blocks_blooms.write();
let mut block_receipts = self.block_receipts.write();
let mut cache_man = self.cache_man.write();
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
cache_man.in_use.remove(&id);
match id {
CacheID::BlockHeader(h) => { block_headers.remove(&h); },
CacheID::BlockBody(h) => { block_bodies.remove(&h); },
CacheID::BlockDetails(h) => { block_details.remove(&h); }
CacheID::BlockHashes(h) => { block_hashes.remove(&h); }
CacheID::TransactionAddresses(h) => { transaction_addresses.remove(&h); }
CacheID::BlocksBlooms(h) => { blocks_blooms.remove(&h); }
CacheID::BlockReceipts(h) => { block_receipts.remove(&h); }
}
for id in &ids {
match *id {
CacheID::BlockHeader(ref h) => { block_headers.remove(h); },
CacheID::BlockBody(ref h) => { block_bodies.remove(h); },
CacheID::BlockDetails(ref h) => { block_details.remove(h); }
CacheID::BlockHashes(ref h) => { block_hashes.remove(h); }
CacheID::TransactionAddresses(ref h) => { transaction_addresses.remove(h); }
CacheID::BlocksBlooms(ref h) => { blocks_blooms.remove(h); }
CacheID::BlockReceipts(ref h) => { block_receipts.remove(h); }
}
cache_man.cache_usage.push_front(HashSet::new());
// TODO: handle block_hashes properly.
block_hashes.clear();
block_headers.shrink_to_fit();
block_bodies.shrink_to_fit();
block_details.shrink_to_fit();
block_hashes.shrink_to_fit();
transaction_addresses.shrink_to_fit();
blocks_blooms.shrink_to_fit();
block_receipts.shrink_to_fit();
}
trace!("Cache cleanup round complete {}, cache_size = {}", i, self.cache_size().total());
if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; }
}
// TODO: m_lastCollection = chrono::system_clock::now();
block_headers.shrink_to_fit();
block_bodies.shrink_to_fit();
block_details.shrink_to_fit();
block_hashes.shrink_to_fit();
transaction_addresses.shrink_to_fit();
blocks_blooms.shrink_to_fit();
block_receipts.shrink_to_fit();
});
}
/// Create a block body from a block.
@ -991,6 +966,8 @@ mod tests {
// when
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
assert_eq!(bc.best_block_number(), 0);
bc.commit();
// NOTE no db.write here (we want to check if best block is cached)
// then
@ -1020,6 +997,7 @@ mod tests {
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
db.write(batch).unwrap();
bc.commit();
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
assert_eq!(bc.best_block_number(), 1);
@ -1047,6 +1025,7 @@ mod tests {
let block = canon_chain.generate(&mut finalizer).unwrap();
block_hashes.push(BlockView::new(&block).header_view().sha3());
bc.insert_block(&batch, &block, vec![]);
bc.commit();
}
db.write(batch).unwrap();
@ -1077,7 +1056,10 @@ mod tests {
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let batch = db.transaction();
bc.insert_block(&batch, &b1a, vec![]);
for b in [&b1a, &b1b, &b2a, &b2b, &b3a, &b3b, &b4a, &b4b, &b5a, &b5b].iter() {
bc.insert_block(&batch, b, vec![]);
bc.commit();
}
bc.insert_block(&batch, &b1b, vec![]);
bc.insert_block(&batch, &b2a, vec![]);
bc.insert_block(&batch, &b2b, vec![]);
@ -1123,11 +1105,16 @@ mod tests {
let batch = db.transaction();
let ir1 = bc.insert_block(&batch, &b1, vec![]);
bc.commit();
let ir2 = bc.insert_block(&batch, &b2, vec![]);
bc.commit();
let ir3b = bc.insert_block(&batch, &b3b, vec![]);
bc.commit();
db.write(batch).unwrap();
assert_eq!(bc.block_hash(3).unwrap(), b3b_hash);
let batch = db.transaction();
let ir3a = bc.insert_block(&batch, &b3a, vec![]);
bc.commit();
db.write(batch).unwrap();
assert_eq!(ir1, ImportRoute {
@ -1235,6 +1222,7 @@ mod tests {
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
db.write(batch).unwrap();
bc.commit();
assert_eq!(bc.best_block_hash(), first_hash);
}
@ -1299,6 +1287,7 @@ mod tests {
let batch = db.transaction();
bc.insert_block(&batch, &b1, vec![]);
db.write(batch).unwrap();
bc.commit();
let transactions = bc.transactions(&b1_hash).unwrap();
assert_eq!(transactions.len(), 7);
@ -1311,6 +1300,7 @@ mod tests {
let batch = db.transaction();
let res = bc.insert_block(&batch, bytes, receipts);
db.write(batch).unwrap();
bc.commit();
res
}
@ -1401,11 +1391,13 @@ mod tests {
for _ in 0..5 {
let canon_block = canon_chain.generate(&mut finalizer).unwrap();
bc.insert_block(&batch, &canon_block, vec![]);
bc.commit();
}
assert_eq!(bc.best_block_number(), 5);
bc.insert_block(&batch, &uncle, vec![]);
db.write(batch).unwrap();
bc.commit();
}
// re-loading the blockchain should load the correct best block.
@ -1431,7 +1423,9 @@ mod tests {
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
bc.commit();
bc.insert_block(&batch, &second, vec![]);
bc.commit();
db.write(batch).unwrap();
assert_eq!(bc.rewind(), Some(first_hash.clone()));

View File

@ -0,0 +1,69 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{VecDeque, HashSet};
use std::hash::Hash;
const COLLECTION_QUEUE_SIZE: usize = 8;
pub struct CacheManager<T> where T: Eq + Hash {
pref_cache_size: usize,
max_cache_size: usize,
bytes_per_cache_entry: usize,
cache_usage: VecDeque<HashSet<T>>
}
impl<T> CacheManager<T> where T: Eq + Hash {
pub fn new(pref_cache_size: usize, max_cache_size: usize, bytes_per_cache_entry: usize) -> Self {
CacheManager {
pref_cache_size: pref_cache_size,
max_cache_size: max_cache_size,
bytes_per_cache_entry: bytes_per_cache_entry,
cache_usage: (0..COLLECTION_QUEUE_SIZE).into_iter().map(|_| Default::default()).collect(),
}
}
pub fn note_used(&mut self, id: T) {
if !self.cache_usage[0].contains(&id) {
if let Some(c) = self.cache_usage.iter_mut().skip(1).find(|e| e.contains(&id)) {
c.remove(&id);
}
self.cache_usage[0].insert(id);
}
}
pub fn collect_garbage<C, F>(&mut self, current_size: C, mut notify_unused: F) where C: Fn() -> usize, F: FnMut(HashSet<T>) {
if current_size() < self.pref_cache_size {
self.rotate_cache_if_needed();
return;
}
for _ in 0..COLLECTION_QUEUE_SIZE {
notify_unused(self.cache_usage.pop_back().unwrap());
self.cache_usage.push_front(Default::default());
if current_size() < self.max_cache_size {
break;
}
}
}
fn rotate_cache_if_needed(&mut self) {
if self.cache_usage[0].len() * self.bytes_per_cache_entry > self.pref_cache_size / COLLECTION_QUEUE_SIZE {
let cache = self.cache_usage.pop_back().unwrap();
self.cache_usage.push_front(cache);
}
}
}

View File

@ -178,15 +178,15 @@ impl Client {
db_config.compaction = config.db_compaction.compaction_profile();
db_config.wal = config.db_wal;
let db = Arc::new(Database::open(&db_config, &path.to_str().unwrap()).expect("Error opening database"));
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database)));
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone()));
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone())));
let mut state_db = journaldb::new(db.clone(), config.pruning, DB_COL_STATE);
if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
let batch = DBTransaction::new(&db);
state_db.commit(&batch, 0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
db.write(batch).expect("Error writing genesis state to state DB");
try!(state_db.commit(&batch, 0, &spec.genesis_header().hash(), None));
try!(db.write(batch).map_err(ClientError::Database));
}
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
@ -452,6 +452,7 @@ impl Client {
});
// Final commit to the DB
self.db.write(batch).expect("State DB write failed.");
self.chain.commit();
self.update_last_hashes(&parent, hash);
route
@ -549,6 +550,7 @@ impl Client {
pub fn tick(&self) {
self.chain.collect_garbage();
self.block_queue.collect_garbage();
self.tracedb.collect_garbage();
match self.mode {
Mode::Dark(timeout) => {
@ -582,11 +584,6 @@ impl Client {
}
}
/// Set up the cache behaviour.
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
self.chain.configure_cache(pref_cache_size, max_cache_size);
}
/// Look up the block number for the given block ID.
pub fn block_number(&self, id: BlockID) -> Option<BlockNumber> {
match id {

View File

@ -1,4 +1,5 @@
use trace::Error as TraceError;
use util::UtilError;
use std::fmt::{Display, Formatter, Error as FmtError};
/// Client configuration errors.
@ -6,6 +7,10 @@ use std::fmt::{Display, Formatter, Error as FmtError};
pub enum Error {
/// TraceDB configuration error.
Trace(TraceError),
/// Database error
Database(String),
/// Util error
Util(UtilError),
}
impl From<TraceError> for Error {
@ -14,10 +19,18 @@ impl From<TraceError> for Error {
}
}
impl From<UtilError> for Error {
fn from(err: UtilError) -> Self {
Error::Util(err)
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
match *self {
Error::Trace(ref err) => write!(f, "{}", err)
Error::Trace(ref err) => write!(f, "{}", err),
Error::Util(ref err) => write!(f, "{}", err),
Error::Database(ref s) => write!(f, "Database error: {}", s),
}
}
}

View File

@ -40,13 +40,13 @@ pub use self::traits::{BlockChainClient, MiningBlockChainClient, RemoteClient};
mod traits {
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/traits.ipc.rs"));
include!(concat!(env!("OUT_DIR"), "/traits.rs"));
}
pub mod chain_notify {
//! Chain notify interface
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/chain_notify.ipc.rs"));
include!(concat!(env!("OUT_DIR"), "/chain_notify.rs"));
}

View File

@ -257,7 +257,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
}
impl MiningBlockChainClient for TestBlockChainClient {
fn prepare_open_block(&self, _author: Address, _gas_range_target: (U256, U256), _extra_data: Bytes) -> OpenBlock {
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
let engine = &self.spec.engine;
let genesis_header = self.spec.genesis_header();
let mut db_result = get_temp_journal_db();
@ -265,7 +265,7 @@ impl MiningBlockChainClient for TestBlockChainClient {
self.spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
OpenBlock::new(
let mut open_block = OpenBlock::new(
engine.deref(),
self.vm_factory(),
Default::default(),
@ -273,10 +273,13 @@ impl MiningBlockChainClient for TestBlockChainClient {
db,
&genesis_header,
last_hashes,
Address::zero(),
(3141562.into(), 31415620.into()),
vec![]
).expect("Opening block for tests will not fail.")
author,
gas_range_target,
extra_data
).expect("Opening block for tests will not fail.");
// TODO [todr] Override timestamp for predictability (set_timestamp_now kind of sucks)
open_block.set_timestamp(10_000_000);
open_block
}
fn vm_factory(&self) -> &EvmFactory {

View File

@ -0,0 +1,108 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeMap;
use util::hash::Address;
use builtin::Builtin;
use engines::Engine;
use spec::CommonParams;
use evm::Schedule;
use env_info::EnvInfo;
use block::ExecutedBlock;
use common::Bytes;
use account_provider::AccountProvider;
/// An engine which does not provide any consensus mechanism, just seals blocks internally.
pub struct InstantSeal {
params: CommonParams,
builtins: BTreeMap<Address, Builtin>,
}
impl InstantSeal {
/// Returns new instance of InstantSeal with default VM Factory
pub fn new(params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Self {
InstantSeal {
params: params,
builtins: builtins,
}
}
}
impl Engine for InstantSeal {
fn name(&self) -> &str {
"InstantSeal"
}
fn params(&self) -> &CommonParams {
&self.params
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
&self.builtins
}
fn schedule(&self, _env_info: &EnvInfo) -> Schedule {
Schedule::new_homestead()
}
fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> {
Some(Vec::new())
}
}
#[cfg(test)]
mod tests {
use common::*;
use tests::helpers::*;
use account_provider::AccountProvider;
use spec::Spec;
use block::*;
/// Create a new test chain spec with `BasicAuthority` consensus engine.
fn new_test_instant() -> Spec { Spec::load(include_bytes!("../../res/instant_seal.json")) }
#[test]
fn instant_can_seal() {
let tap = AccountProvider::transient_provider();
let addr = tap.insert_account("".sha3(), "").unwrap();
let spec = new_test_instant();
let engine = &spec.engine;
let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()];
let vm_factory = Default::default();
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock();
// Seal with empty AccountProvider.
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
assert!(b.try_seal(engine.deref(), seal).is_ok());
}
#[test]
fn instant_cant_verify() {
let engine = new_test_instant().engine;
let mut header: Header = Header::default();
assert!(engine.verify_block_basic(&header, None).is_ok());
header.set_seal(vec![rlp::encode(&Signature::zero()).to_vec()]);
assert!(engine.verify_block_unordered(&header, None).is_ok());
}
}

View File

@ -17,9 +17,11 @@
//! Consensus engine specification and basic implementations.
mod null_engine;
mod instant_seal;
mod basic_authority;
pub use self::null_engine::NullEngine;
pub use self::instant_seal::InstantSeal;
pub use self::basic_authority::BasicAuthority;
use common::*;

View File

@ -107,9 +107,9 @@ pub trait CostType: ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Out
fn overflow_add(self, other: Self) -> (Self, bool);
/// Multiple with overflow
fn overflow_mul(self, other: Self) -> (Self, bool);
/// Single-step full multiplication and division: `self*other/div`
/// Single-step full multiplication and shift: `(self*other) >> shr`
/// Should not overflow on intermediate steps
fn overflow_mul_div(self, other: Self, div: Self) -> (Self, bool);
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool);
}
impl CostType for U256 {
@ -133,14 +133,14 @@ impl CostType for U256 {
Uint::overflowing_mul(self, other)
}
fn overflow_mul_div(self, other: Self, div: Self) -> (Self, bool) {
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) {
let x = self.full_mul(other);
let (U512(parts), o) = Uint::overflowing_div(x, U512::from(div));
let U512(parts) = x;
let overflow = (parts[4] | parts[5] | parts[6] | parts[7]) > 0;
let U512(parts) = x >> shr;
(
U256([parts[0], parts[1], parts[2], parts[3]]),
o | overflow
overflow
)
}
}
@ -169,11 +169,13 @@ impl CostType for usize {
self.overflowing_mul(other)
}
fn overflow_mul_div(self, other: Self, div: Self) -> (Self, bool) {
fn overflow_mul_shr(self, other: Self, shr: usize) -> (Self, bool) {
let (c, o) = U128::from(self).overflowing_mul(U128::from(other));
let (U128(parts), o1) = c.overflowing_div(U128::from(div));
let U128(parts) = c;
let overflow = o | (parts[1] > 0);
let U128(parts) = c >> shr;
let result = parts[0] as usize;
let overflow = o | o1 | (parts[1] > 0) | (parts[0] > result as u64);
let overflow = overflow | (parts[0] > result as u64);
(result, overflow)
}
}
@ -189,13 +191,13 @@ pub trait Evm {
#[test]
fn should_calculate_overflow_mul_div_without_overflow() {
fn should_calculate_overflow_mul_shr_without_overflow() {
// given
let num = 10_000_000;
let num = 1048576;
// when
let (res1, o1) = U256::from(num).overflow_mul_div(U256::from(num), U256::from(num));
let (res2, o2) = num.overflow_mul_div(num, num);
let (res1, o1) = U256::from(num).overflow_mul_shr(U256::from(num), 20);
let (res2, o2) = num.overflow_mul_shr(num, 20);
// then
assert_eq!(res1, U256::from(num));
@ -205,22 +207,21 @@ fn should_calculate_overflow_mul_div_without_overflow() {
}
#[test]
fn should_calculate_overflow_mul_div_with_overflow() {
fn should_calculate_overflow_mul_shr_with_overflow() {
// given
let max = ::std::u64::MAX;
let num1 = U256([max, max, max, max]);
let num2 = ::std::usize::MAX;
// when
let (res1, o1) = num1.overflow_mul_div(num1, num1 - U256::from(2));
let (res2, o2) = num2.overflow_mul_div(num2, num2 - 2);
let (res1, o1) = num1.overflow_mul_shr(num1, 256);
let (res2, o2) = num2.overflow_mul_shr(num2, 64);
// then
// (x+2)^2/x = (x^2 + 4x + 4)/x = x + 4 + 4/x ~ (MAX-2) + 4 + 0 = 1
assert_eq!(res2, 1);
assert_eq!(res2, num2 - 1);
assert!(o2);
assert_eq!(res1, U256::from(1));
assert_eq!(res1, !U256::zero() - U256::one());
assert!(o1);
}

View File

@ -18,8 +18,8 @@
use util::common::*;
use evm::{self, Schedule};
use types::executed::CallType;
use env_info::*;
use types::executed::CallType;
/// Result of externalities create function.
pub enum ContractCreateResult {

View File

@ -37,6 +37,7 @@ enum InstructionCost<Cost: CostType> {
pub struct Gasometer<Gas: CostType> {
pub current_gas: Gas,
pub current_mem_gas: Gas,
}
impl<Gas: CostType> Gasometer<Gas> {
@ -44,6 +45,7 @@ impl<Gas: CostType> Gasometer<Gas> {
pub fn new(current_gas: Gas) -> Self {
Gasometer {
current_gas: current_gas,
current_mem_gas: Gas::from(0),
}
}
@ -62,7 +64,7 @@ impl<Gas: CostType> Gasometer<Gas> {
info: &InstructionInfo,
stack: &Stack<U256>,
current_mem_size: usize,
) -> evm::Result<(Gas, usize)> {
) -> evm::Result<(Gas, Gas, usize)> {
let schedule = ext.schedule();
let tier = instructions::get_tier_idx(info.tier);
let default_gas = Gas::from(schedule.tier_step_gas[tier]);
@ -76,11 +78,11 @@ impl<Gas: CostType> Gasometer<Gas> {
let newval = stack.peek(1);
let val = U256::from(ext.storage_at(&address).as_slice());
let gas = if U256::zero() == val && &U256::zero() != newval {
let gas = if val.is_zero() && !newval.is_zero() {
schedule.sstore_set_gas
} else {
// Refund for below case is added when actually executing sstore
// !self.is_zero(&val) && self.is_zero(newval)
// !is_zero(&val) && is_zero(newval)
schedule.sstore_reset_gas
};
InstructionCost::Gas(Gas::from(gas))
@ -89,25 +91,25 @@ impl<Gas: CostType> Gasometer<Gas> {
InstructionCost::Gas(Gas::from(schedule.sload_gas))
},
instructions::MSTORE | instructions::MLOAD => {
InstructionCost::GasMem(default_gas, try!(self.mem_needed_const(stack.peek(0), 32)))
InstructionCost::GasMem(default_gas, try!(mem_needed_const(stack.peek(0), 32)))
},
instructions::MSTORE8 => {
InstructionCost::GasMem(default_gas, try!(self.mem_needed_const(stack.peek(0), 1)))
InstructionCost::GasMem(default_gas, try!(mem_needed_const(stack.peek(0), 1)))
},
instructions::RETURN => {
InstructionCost::GasMem(default_gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
InstructionCost::GasMem(default_gas, try!(mem_needed(stack.peek(0), stack.peek(1))))
},
instructions::SHA3 => {
let w = overflowing!(add_gas_usize(try!(Gas::from_u256(*stack.peek(1))), 31));
let words = w >> 5;
let gas = Gas::from(schedule.sha3_gas) + (Gas::from(schedule.sha3_word_gas) * words);
InstructionCost::GasMem(gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
InstructionCost::GasMem(gas, try!(mem_needed(stack.peek(0), stack.peek(1))))
},
instructions::CALLDATACOPY | instructions::CODECOPY => {
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(0), stack.peek(2))), try!(Gas::from_u256(*stack.peek(2))))
InstructionCost::GasMemCopy(default_gas, try!(mem_needed(stack.peek(0), stack.peek(2))), try!(Gas::from_u256(*stack.peek(2))))
},
instructions::EXTCODECOPY => {
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(1), stack.peek(3))), try!(Gas::from_u256(*stack.peek(3))))
InstructionCost::GasMemCopy(default_gas, try!(mem_needed(stack.peek(1), stack.peek(3))), try!(Gas::from_u256(*stack.peek(3))))
},
instructions::LOG0...instructions::LOG4 => {
let no_of_topics = instructions::get_log_topics(instruction);
@ -115,13 +117,13 @@ impl<Gas: CostType> Gasometer<Gas> {
let data_gas = overflowing!(try!(Gas::from_u256(*stack.peek(1))).overflow_mul(Gas::from(schedule.log_data_gas)));
let gas = overflowing!(data_gas.overflow_add(Gas::from(log_gas)));
InstructionCost::GasMem(gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
InstructionCost::GasMem(gas, try!(mem_needed(stack.peek(0), stack.peek(1))))
},
instructions::CALL | instructions::CALLCODE => {
let mut gas = overflowing!(add_gas_usize(try!(Gas::from_u256(*stack.peek(0))), schedule.call_gas));
let mem = cmp::max(
try!(self.mem_needed(stack.peek(5), stack.peek(6))),
try!(self.mem_needed(stack.peek(3), stack.peek(4)))
try!(mem_needed(stack.peek(5), stack.peek(6))),
try!(mem_needed(stack.peek(3), stack.peek(4)))
);
let address = u256_to_address(stack.peek(1));
@ -130,7 +132,7 @@ impl<Gas: CostType> Gasometer<Gas> {
gas = overflowing!(gas.overflow_add(Gas::from(schedule.call_new_account_gas)));
};
if stack.peek(2) > &U256::zero() {
if !stack.peek(2).is_zero() {
gas = overflowing!(gas.overflow_add(Gas::from(schedule.call_value_transfer_gas)));
};
@ -139,14 +141,14 @@ impl<Gas: CostType> Gasometer<Gas> {
instructions::DELEGATECALL => {
let gas = overflowing!(add_gas_usize(try!(Gas::from_u256(*stack.peek(0))), schedule.call_gas));
let mem = cmp::max(
try!(self.mem_needed(stack.peek(4), stack.peek(5))),
try!(self.mem_needed(stack.peek(2), stack.peek(3)))
try!(mem_needed(stack.peek(4), stack.peek(5))),
try!(mem_needed(stack.peek(2), stack.peek(3)))
);
InstructionCost::GasMem(gas, mem)
},
instructions::CREATE => {
let gas = Gas::from(schedule.create_gas);
let mem = try!(self.mem_needed(stack.peek(1), stack.peek(2)));
let mem = try!(mem_needed(stack.peek(1), stack.peek(2)));
InstructionCost::GasMem(gas, mem)
},
instructions::EXP => {
@ -160,66 +162,65 @@ impl<Gas: CostType> Gasometer<Gas> {
match cost {
InstructionCost::Gas(gas) => {
Ok((gas, 0))
Ok((gas, self.current_mem_gas, 0))
},
InstructionCost::GasMem(gas, mem_size) => {
let (mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, current_mem_size, &mem_size));
let gas = overflowing!(gas.overflow_add(mem_gas));
Ok((gas, new_mem_size))
let (mem_gas_cost, new_mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, current_mem_size, &mem_size));
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
Ok((gas, new_mem_gas, new_mem_size))
},
InstructionCost::GasMemCopy(gas, mem_size, copy) => {
let (mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, current_mem_size, &mem_size));
let copy = overflowing!(add_gas_usize(copy, 31));
let copy_gas = Gas::from(schedule.copy_gas) * (copy / Gas::from(32 as usize));
let (mem_gas_cost, new_mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, current_mem_size, &mem_size));
let copy = overflowing!(add_gas_usize(copy, 31)) >> 5;
let copy_gas = Gas::from(schedule.copy_gas) * copy;
let gas = overflowing!(gas.overflow_add(copy_gas));
let gas = overflowing!(gas.overflow_add(mem_gas));
Ok((gas, new_mem_size))
let gas = overflowing!(gas.overflow_add(mem_gas_cost));
Ok((gas, new_mem_gas, new_mem_size))
}
}
}
fn is_zero(&self, val: &Gas) -> bool {
&Gas::from(0) == val
}
fn mem_needed_const(&self, mem: &U256, add: usize) -> evm::Result<Gas> {
Gas::from_u256(overflowing!(mem.overflowing_add(U256::from(add))))
}
fn mem_needed(&self, offset: &U256, size: &U256) -> evm::Result<Gas> {
if self.is_zero(&try!(Gas::from_u256(*size))) {
return Ok(Gas::from(0));
}
Gas::from_u256(overflowing!(offset.overflowing_add(*size)))
}
fn mem_gas_cost(&self, schedule: &evm::Schedule, current_mem_size: usize, mem_size: &Gas) -> evm::Result<(Gas, usize)> {
fn mem_gas_cost(&self, schedule: &evm::Schedule, current_mem_size: usize, mem_size: &Gas) -> evm::Result<(Gas, Gas, usize)> {
let gas_for_mem = |mem_size: Gas| {
let s = mem_size >> 5;
// s * memory_gas + s * s / quad_coeff_div
let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas)));
// Calculate s*s/quad_coeff_div
let b = overflowing!(s.overflow_mul_div(s, Gas::from(schedule.quad_coeff_div)));
debug_assert_eq!(schedule.quad_coeff_div, 512);
let b = overflowing!(s.overflow_mul_shr(s, 9));
Ok(overflowing!(a.overflow_add(b)))
};
let current_mem_size = Gas::from(current_mem_size);
let req_mem_size_rounded = (overflowing!(mem_size.overflow_add(Gas::from(31 as usize))) >> 5) << 5;
let mem_gas_cost = if req_mem_size_rounded > current_mem_size {
let (mem_gas_cost, new_mem_gas) = if req_mem_size_rounded > current_mem_size {
let new_mem_gas = try!(gas_for_mem(req_mem_size_rounded));
let current_mem_gas = try!(gas_for_mem(current_mem_size));
new_mem_gas - current_mem_gas
(new_mem_gas - self.current_mem_gas, new_mem_gas)
} else {
Gas::from(0)
(Gas::from(0), self.current_mem_gas)
};
Ok((mem_gas_cost, req_mem_size_rounded.as_usize()))
Ok((mem_gas_cost, new_mem_gas, req_mem_size_rounded.as_usize()))
}
}
#[inline]
fn mem_needed_const<Gas: CostType>(mem: &U256, add: usize) -> evm::Result<Gas> {
Gas::from_u256(overflowing!(mem.overflowing_add(U256::from(add))))
}
#[inline]
fn mem_needed<Gas: CostType>(offset: &U256, size: &U256) -> evm::Result<Gas> {
if size.is_zero() {
return Ok(Gas::from(0));
}
Gas::from_u256(overflowing!(offset.overflowing_add(*size)))
}
#[inline]
fn add_gas_usize<Gas: CostType>(value: Gas, num: usize) -> (Gas, bool) {
value.overflow_add(Gas::from(num))
@ -251,9 +252,10 @@ fn test_calculate_mem_cost() {
let mem_size = 5;
// when
let (mem_cost, mem_size) = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size).unwrap();
let (mem_cost, new_mem_gas, mem_size) = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size).unwrap();
// then
assert_eq!(mem_cost, 3);
assert_eq!(new_mem_gas, 3);
assert_eq!(mem_size, 32);
}

View File

@ -41,6 +41,7 @@ use common::*;
use types::executed::CallType;
use super::instructions::{self, Instruction, InstructionInfo};
use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType};
use bit_set::BitSet;
#[cfg(feature = "evm-debug")]
fn color(instruction: Instruction, name: &'static str) -> String {
@ -115,12 +116,13 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
try!(self.verify_instruction(ext, instruction, &info, &stack));
// Calculate gas cost
let (gas_cost, mem_size) = try!(gasometer.get_gas_cost_mem(ext, instruction, &info, &stack, self.mem.size()));
let (gas_cost, mem_gas, mem_size) = try!(gasometer.get_gas_cost_mem(ext, instruction, &info, &stack, self.mem.size()));
// TODO: make compile-time removable if too much of a performance hit.
let trace_executed = ext.trace_prepare_execute(reader.position - 1, instruction, &gas_cost.as_u256());
try!(gasometer.verify_gas(&gas_cost));
self.mem.expand(mem_size);
gasometer.current_mem_gas = mem_gas;
gasometer.current_gas = gasometer.current_gas - gas_cost;
evm_debug!({
@ -540,10 +542,10 @@ impl<Cost: CostType> Interpreter<Cost> {
}
}
fn verify_jump(&self, jump_u: U256, valid_jump_destinations: &HashSet<usize>) -> evm::Result<usize> {
fn verify_jump(&self, jump_u: U256, valid_jump_destinations: &BitSet) -> evm::Result<usize> {
let jump = jump_u.low_u64() as usize;
if valid_jump_destinations.contains(&jump) && jump_u < U256::from(!0 as usize) {
if valid_jump_destinations.contains(jump) && U256::from(jump) == jump_u {
Ok(jump)
} else {
Err(evm::Error::BadJumpDestination {
@ -765,8 +767,8 @@ impl<Cost: CostType> Interpreter<Cost> {
Ok(())
}
fn find_jump_destinations(&self, code: &[u8]) -> HashSet<CodePosition> {
let mut jump_dests = HashSet::new();
fn find_jump_destinations(&self, code: &[u8]) -> BitSet {
let mut jump_dests = BitSet::with_capacity(code.len());
let mut position = 0;
while position < code.len() {
@ -818,5 +820,5 @@ fn test_find_jump_destinations() {
let valid_jump_destinations = interpreter.find_jump_destinations(&code);
// then
assert!(valid_jump_destinations.contains(&66));
assert!(valid_jump_destinations.contains(66));
}

View File

@ -35,3 +35,4 @@ pub use self::evm::{Evm, Error, Finalize, GasLeft, Result, CostType};
pub use self::ext::{Ext, ContractCreateResult, MessageCallResult};
pub use self::factory::{Factory, VMType};
pub use self::schedule::Schedule;
pub use types::executed::CallType;

View File

@ -125,7 +125,18 @@ impl Schedule {
tx_create_gas: tcg,
tx_data_zero_gas: 4,
tx_data_non_zero_gas: 68,
copy_gas: 3,
copy_gas: 3,
}
}
}
#[test]
#[cfg(test)]
fn schedule_evm_assumptions() {
let s1 = Schedule::new_frontier();
let s2 = Schedule::new_homestead();
// To optimize division we assume 2**9 for quad_coeff_div
assert_eq!(s1.quad_coeff_div, 512);
assert_eq!(s2.quad_coeff_div, 512);
}

View File

@ -95,6 +95,7 @@ pub extern crate ethstore;
extern crate semver;
extern crate ethcore_ipc_nano as nanoipc;
extern crate ethcore_devtools as devtools;
extern crate bit_set;
#[cfg(feature = "jit" )] extern crate evmjit;
@ -117,6 +118,7 @@ pub mod snapshot;
pub mod action_params;
#[macro_use] pub mod evm;
mod cache_manager;
mod blooms;
mod db;
mod common;

View File

@ -16,7 +16,8 @@
use std::collections::HashMap;
use std::sync::Arc;
use util::{RwLock, U256, H256};
use std::time::{Instant, Duration};
use util::{Mutex, U256, H256};
/// External miner interface.
pub trait ExternalMinerService: Send + Sync {
@ -25,50 +26,50 @@ pub trait ExternalMinerService: Send + Sync {
/// Total hashrate.
fn hashrate(&self) -> U256;
/// Returns true if external miner is mining.
fn is_mining(&self) -> bool;
}
/// External Miner.
pub struct ExternalMiner {
hashrates: Arc<RwLock<HashMap<H256, U256>>>,
hashrates: Arc<Mutex<HashMap<H256, (Instant, U256)>>>,
}
impl Default for ExternalMiner {
fn default() -> Self {
ExternalMiner {
hashrates: Arc::new(RwLock::new(HashMap::new())),
hashrates: Arc::new(Mutex::new(HashMap::new())),
}
}
}
impl ExternalMiner {
/// Creates new external miner with prefilled hashrates.
pub fn new(hashrates: Arc<RwLock<HashMap<H256, U256>>>) -> Self {
pub fn new(hashrates: Arc<Mutex<HashMap<H256, (Instant, U256)>>>) -> Self {
ExternalMiner {
hashrates: hashrates
hashrates: hashrates,
}
}
}
const ENTRY_TIMEOUT: u64 = 2;
impl ExternalMinerService for ExternalMiner {
fn submit_hashrate(&self, hashrate: U256, id: H256) {
self.hashrates.write().insert(id, hashrate);
self.hashrates.lock().insert(id, (Instant::now() + Duration::from_secs(ENTRY_TIMEOUT), hashrate));
}
fn hashrate(&self) -> U256 {
self.hashrates.read().iter().fold(U256::from(0), |sum, (_, v)| sum + *v)
}
fn is_mining(&self) -> bool {
!self.hashrates.read().is_empty()
let mut hashrates = self.hashrates.lock();
let h = hashrates.drain().filter(|&(_, (t, _))| t > Instant::now()).collect();
*hashrates = h;
hashrates.iter().fold(U256::from(0), |sum, (_, &(_, v))| sum + v)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread::sleep;
use std::time::Duration;
use util::{H256, U256};
fn miner() -> ExternalMiner {
@ -76,16 +77,18 @@ mod tests {
}
#[test]
fn should_return_that_is_mining_if_there_is_at_least_one_entry() {
fn it_should_forget_old_hashrates() {
// given
let m = miner();
assert_eq!(m.is_mining(), false);
assert_eq!(m.hashrate(), U256::from(0));
m.submit_hashrate(U256::from(10), H256::from(1));
assert_eq!(m.hashrate(), U256::from(10));
// when
m.submit_hashrate(U256::from(10), H256::from(1));
sleep(Duration::from_secs(3));
// then
assert_eq!(m.is_mining(), true);
assert_eq!(m.hashrate(), U256::from(0));
}
#[test]

View File

@ -780,6 +780,10 @@ impl MinerService for Miner {
}
}
fn is_sealing(&self) -> bool {
self.sealing_work.lock().queue.is_in_use()
}
fn map_sealing_work<F, T>(&self, chain: &MiningBlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
trace!(target: "miner", "map_sealing_work: entering");
self.enable_and_prepare_sealing(chain);

View File

@ -150,6 +150,9 @@ pub trait MinerService : Send + Sync {
/// Returns highest transaction nonce for given address.
fn last_nonce(&self, address: &Address) -> Option<U256>;
/// Is it currently sealing?
fn is_sealing(&self) -> bool;
/// Suggested gas price.
fn sensible_gas_price(&self) -> U256 { 20000000000u64.into() }

View File

@ -96,7 +96,7 @@ impl From<ethjson::spec::Account> for PodAccount {
PodAccount {
balance: a.balance.map_or_else(U256::zero, Into::into),
nonce: a.nonce.map_or_else(U256::zero, Into::into),
code: Some(vec![]),
code: a.code.map(Into::into).or(Some(Vec::new())),
storage: BTreeMap::new()
}
}

View File

@ -17,7 +17,7 @@
//! Parameters for a block chain.
use common::*;
use engines::{Engine, NullEngine, BasicAuthority};
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority};
use pod_state::*;
use account_db::*;
use super::genesis::Genesis;
@ -133,6 +133,7 @@ impl Spec {
fn engine(engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>) -> Box<Engine> {
match engine_spec {
ethjson::spec::Engine::Null => Box::new(NullEngine::new(params, builtins)),
ethjson::spec::Engine::InstantSeal => Box::new(InstantSeal::new(params, builtins)),
ethjson::spec::Engine::Ethash(ethash) => Box::new(ethereum::Ethash::new(params, From::from(ethash.params), builtins)),
ethjson::spec::Engine::BasicAuthority(basic_authority) => Box::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
}

View File

@ -167,22 +167,26 @@ impl State {
/// Get the balance of account `a`.
pub fn balance(&self, a: &Address) -> U256 {
self.get(a, false).as_ref().map_or(U256::zero(), |account| *account.balance())
self.ensure_cached(a, false,
|a| a.as_ref().map_or(U256::zero(), |account| *account.balance()))
}
/// Get the nonce of account `a`.
pub fn nonce(&self, a: &Address) -> U256 {
self.get(a, false).as_ref().map_or(U256::zero(), |account| *account.nonce())
self.ensure_cached(a, false,
|a| a.as_ref().map_or(U256::zero(), |account| *account.nonce()))
}
/// Mutate storage of account `address` so that it is `value` for `key`.
pub fn storage_at(&self, address: &Address, key: &H256) -> H256 {
self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(self.db.as_hashdb(), address), key))
self.ensure_cached(address, false,
|a| a.as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(self.db.as_hashdb(), address), key)))
}
/// Mutate storage of account `a` so that it is `value` for `key`.
pub fn code(&self, a: &Address) -> Option<Bytes> {
self.get(a, true).as_ref().map_or(None, |a|a.code().map(|x|x.to_vec()))
self.ensure_cached(a, true,
|a| a.as_ref().map_or(None, |a|a.code().map(|x|x.to_vec())))
}
/// Add `incr` to the balance of account `a`.
@ -306,11 +310,13 @@ impl State {
fn query_pod(&mut self, query: &PodState) {
for (ref address, ref pod_account) in query.get() {
if self.get(address, true).is_some() {
for key in pod_account.storage.keys() {
self.storage_at(address, key);
self.ensure_cached(address, true, |a| {
if a.is_some() {
for key in pod_account.storage.keys() {
self.storage_at(address, key);
}
}
}
});
}
}
@ -323,9 +329,10 @@ impl State {
pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post)
}
/// Pull account `a` in our cache from the trie DB and return it.
/// Ensure account `a` is in our cache of the trie DB and return a handle for getting it.
/// `require_code` requires that the code be cached, too.
fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option<Account> {
fn ensure_cached<'a, F, U>(&'a self, a: &'a Address, require_code: bool, f: F) -> U
where F: FnOnce(&Option<Account>) -> U {
let have_key = self.cache.borrow().contains_key(a);
if !have_key {
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
@ -336,7 +343,8 @@ impl State {
account.cache_code(&AccountDB::new(self.db.as_hashdb(), a));
}
}
unsafe { ::std::mem::transmute(self.cache.borrow().get(a).unwrap()) }
f(self.cache.borrow().get(a).unwrap())
}
/// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too.

View File

@ -261,6 +261,7 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockCh
let batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
bc.commit();
}
db.write(batch).unwrap();
@ -279,6 +280,7 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes
let batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
bc.commit();
}
db.write(batch).unwrap();

View File

@ -68,8 +68,10 @@ pub struct Config {
pub enabled: Switch,
/// Traces blooms configuration.
pub blooms: BloomConfig,
/// Database cache-size if not default
pub db_cache_size: Option<usize>,
/// Preferef cache-size.
pub pref_cache_size: usize,
/// Max cache-size.
pub max_cache_size: usize,
}
impl Default for Config {
@ -80,7 +82,8 @@ impl Default for Config {
levels: 3,
elements_per_index: 16,
},
db_cache_size: None,
pref_cache_size: 15 * 1024 * 1024,
max_cache_size: 20 * 1024 * 1024,
}
}
}

View File

@ -20,14 +20,14 @@ use std::collections::HashMap;
use std::sync::Arc;
use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{H256, H264, Database, DBTransaction, RwLock};
use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf};
use header::BlockNumber;
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error};
use db::{Key, Writable, Readable, CacheUpdatePolicy};
use blooms;
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
use client::DB_COL_TRACE;
use cache_manager::CacheManager;
const TRACE_DB_VER: &'static [u8] = b"1.0";
@ -62,6 +62,12 @@ impl From<GroupPosition> for TraceGroupPosition {
}
}
impl HeapSizeOf for TraceGroupPosition {
fn heap_size_of_children(&self) -> usize {
0
}
}
/// Helper data structure created cause [u8; 6] does not implement Deref to &[u8].
pub struct TraceGroupKey([u8; 6]);
@ -88,11 +94,18 @@ impl Key<blooms::BloomGroup> for TraceGroupPosition {
}
}
#[derive(Debug, Hash, Eq, PartialEq)]
enum CacheID {
Trace(H256),
Bloom(TraceGroupPosition),
}
/// Trace database.
pub struct TraceDB<T> where T: DatabaseExtras {
// cache
traces: RwLock<HashMap<H256, FlatBlockTraces>>,
blooms: RwLock<HashMap<TraceGroupPosition, blooms::BloomGroup>>,
cache_manager: RwLock<CacheManager<CacheID>>,
// db
tracesdb: Arc<Database>,
// config,
@ -106,6 +119,7 @@ pub struct TraceDB<T> where T: DatabaseExtras {
impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
fn blooms_at(&self, position: &GroupPosition) -> Option<BloomGroup> {
let position = TraceGroupPosition::from(position.clone());
self.note_used(CacheID::Bloom(position.clone()));
self.tracesdb.read_with_cache(DB_COL_TRACE, &self.blooms, &position).map(Into::into)
}
}
@ -136,6 +150,7 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
let db = TraceDB {
traces: RwLock::new(HashMap::new()),
blooms: RwLock::new(HashMap::new()),
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
tracesdb: tracesdb,
bloom_config: config.blooms,
enabled: enabled,
@ -145,8 +160,39 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
Ok(db)
}
fn cache_size(&self) -> usize {
let traces = self.traces.read().heap_size_of_children();
let blooms = self.blooms.read().heap_size_of_children();
traces + blooms
}
/// Let the cache system know that a cacheable item has been used.
fn note_used(&self, id: CacheID) {
let mut cache_manager = self.cache_manager.write();
cache_manager.note_used(id);
}
/// Ticks our cache system and throws out any old data.
pub fn collect_garbage(&self) {
let mut cache_manager = self.cache_manager.write();
cache_manager.collect_garbage(|| self.cache_size(), | ids | {
let mut traces = self.traces.write();
let mut blooms = self.blooms.write();
for id in &ids {
match *id {
CacheID::Trace(ref h) => { traces.remove(h); },
CacheID::Bloom(ref h) => { blooms.remove(h); },
}
}
traces.shrink_to_fit();
blooms.shrink_to_fit();
});
}
/// Returns traces for block with hash.
fn traces(&self, block_hash: &H256) -> Option<FlatBlockTraces> {
self.note_used(CacheID::Trace(block_hash.clone()));
self.tracesdb.read_with_cache(DB_COL_TRACE, &self.traces, block_hash)
}
@ -218,6 +264,8 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
// at first, let's insert new block traces
{
// note_used must be called before locking traces to avoid cache/traces deadlock on garbage collection
self.note_used(CacheID::Trace(request.block_hash.clone()));
let mut traces = self.traces.write();
// it's important to use overwrite here,
// cause this value might be queried by hash later
@ -247,6 +295,9 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
.collect::<HashMap<TraceGroupPosition, blooms::BloomGroup>>();
let mut blooms = self.blooms.write();
for key in blooms_to_insert.keys() {
self.note_used(CacheID::Bloom(key.clone()));
}
batch.extend_with_cache(DB_COL_TRACE, blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove);
}
}
@ -373,6 +424,7 @@ mod tests {
}
}
#[derive(Clone)]
struct Extras {
block_hashes: HashMap<BlockNumber, H256>,
transaction_hashes: HashMap<BlockNumber, Vec<H256>>,
@ -600,4 +652,36 @@ mod tests {
assert_eq!(tracedb.trace(0, 0, vec![]).unwrap(), create_simple_localized_trace(0, block_0.clone(), tx_0.clone()));
assert_eq!(tracedb.trace(1, 0, vec![]).unwrap(), create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
}
#[test]
fn query_trace_after_reopen() {
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default();
let mut extras = Extras::default();
let block_0 = H256::from(0xa1);
let tx_0 = H256::from(0xff);
extras.block_hashes.insert(0, block_0.clone());
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
// set tracing on
config.enabled = Switch::On;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())).unwrap();
// import block 0
let request = create_simple_import_request(0, block_0.clone());
let batch = DBTransaction::new(&db);
tracedb.import(&batch, request);
db.write(batch).unwrap();
}
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)).unwrap();
let traces = tracedb.transaction_traces(0, 0);
assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]);
}
}
}

View File

@ -17,4 +17,4 @@
//! Types used in the public api
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/types.rs"));
include!(concat!(env!("OUT_DIR"), "/mod.rs.in"));

View File

@ -20,6 +20,7 @@ use std::collections::VecDeque;
use std::mem;
use ipc::binary::BinaryConvertError;
use util::rlp::*;
use util::HeapSizeOf;
use basic_types::LogBloom;
use super::trace::{Action, Res};
@ -47,6 +48,12 @@ impl FlatTrace {
}
}
impl HeapSizeOf for FlatTrace {
fn heap_size_of_children(&self) -> usize {
self.trace_address.heap_size_of_children()
}
}
impl Encodable for FlatTrace {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(4);
@ -82,6 +89,12 @@ impl From<Vec<FlatTrace>> for FlatTransactionTraces {
}
}
impl HeapSizeOf for FlatTransactionTraces {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children()
}
}
impl FlatTransactionTraces {
/// Returns bloom of all traces in the collection.
pub fn bloom(&self) -> LogBloom {
@ -111,6 +124,12 @@ impl Into<Vec<FlatTrace>> for FlatTransactionTraces {
#[derive(Debug, PartialEq, Clone)]
pub struct FlatBlockTraces(Vec<FlatTransactionTraces>);
impl HeapSizeOf for FlatBlockTraces {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children()
}
}
impl From<Vec<FlatTransactionTraces>> for FlatBlockTraces {
fn from(v: Vec<FlatTransactionTraces>) -> Self {
FlatBlockTraces(v)
@ -145,31 +164,63 @@ impl Into<Vec<FlatTransactionTraces>> for FlatBlockTraces {
#[cfg(test)]
mod tests {
use super::{FlatBlockTraces, FlatTransactionTraces, FlatTrace};
use trace::trace::{Action, Res, CallResult, Call};
use trace::trace::{Action, Res, CallResult, Call, Suicide};
use types::executed::CallType;
#[test]
fn test_trace_serialization() {
use util::rlp;
// block #51921
let flat_trace = FlatTrace {
action: Action::Call(Call {
from: 1.into(),
to: 2.into(),
value: 3.into(),
gas: 4.into(),
input: vec![0x5],
from: "8dda5e016e674683241bf671cced51e7239ea2bc".parse().unwrap(),
to: "37a5e19cc2d49f244805d5c268c0e6f321965ab9".parse().unwrap(),
value: "3627e8f712373c0000".parse().unwrap(),
gas: 0x03e8.into(),
input: vec![],
call_type: CallType::Call,
}),
result: Res::Call(CallResult {
gas_used: 10.into(),
output: vec![0x11, 0x12]
gas_used: 0.into(),
output: vec![],
}),
trace_address: Default::default(),
subtraces: 0,
};
let block_traces = FlatBlockTraces(vec![FlatTransactionTraces(vec![flat_trace])]);
let flat_trace1 = FlatTrace {
action: Action::Call(Call {
from: "3d0768da09ce77d25e2d998e6a7b6ed4b9116c2d".parse().unwrap(),
to: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(),
value: 0.into(),
gas: 0x010c78.into(),
input: vec![0x41, 0xc0, 0xe1, 0xb5],
call_type: CallType::Call,
}),
result: Res::Call(CallResult {
gas_used: 0x0127.into(),
output: vec![],
}),
trace_address: Default::default(),
subtraces: 1,
};
let flat_trace2 = FlatTrace {
action: Action::Suicide(Suicide {
address: "412fda7643b37d436cb40628f6dbbb80a07267ed".parse().unwrap(),
balance: 0.into(),
refund_address: "3d0768da09ce77d25e2d998e6a7b6ed4b9116c2d".parse().unwrap(),
}),
result: Res::None,
trace_address: vec![0].into_iter().collect(),
subtraces: 0,
};
let block_traces = FlatBlockTraces(vec![
FlatTransactionTraces(vec![flat_trace]),
FlatTransactionTraces(vec![flat_trace1, flat_trace2])
]);
let encoded = rlp::encode(&block_traces);
let decoded = rlp::decode(&encoded);

View File

@ -252,7 +252,7 @@ impl Decodable for Suicide {
let res = Suicide {
address: try!(d.val_at(0)),
refund_address: try!(d.val_at(1)),
balance: try!(d.val_at(3)),
balance: try!(d.val_at(2)),
};
Ok(res)
@ -298,7 +298,7 @@ impl Decodable for Action {
match action_type {
0 => d.val_at(1).map(Action::Call),
1 => d.val_at(1).map(Action::Create),
2 => d.val_at(2).map(Action::Suicide),
2 => d.val_at(1).map(Action::Suicide),
_ => Err(DecoderError::Custom("Invalid action type.")),
}
}

View File

@ -138,7 +138,7 @@ fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item
let ok = store.change_password(&address, &old_pwd, &new_pwd).is_ok();
Ok(format!("{}", ok))
} else if args.cmd_list {
let accounts = store.accounts();
let accounts = try!(store.accounts());
Ok(format_accounts(&accounts))
} else if args.cmd_import {
let src = try!(key_dir(&args.flag_src));

View File

@ -68,7 +68,7 @@ pub mod aes {
use rcrypto::blockmodes::{CtrMode, CbcDecryptor, PkcsPadding};
use rcrypto::aessafe::{AesSafe128Encryptor, AesSafe128Decryptor};
use rcrypto::symmetriccipher::{Encryptor, Decryptor, SymmetricCipherError};
use rcrypto::buffer::{RefReadBuffer, RefWriteBuffer};
use rcrypto::buffer::{RefReadBuffer, RefWriteBuffer, WriteBuffer};
/// Encrypt a message
pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) {
@ -83,10 +83,12 @@ pub mod aes {
}
/// Decrypt a message using cbc mode
pub fn decrypt_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result<(), SymmetricCipherError> {
pub fn decrypt_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result<usize, SymmetricCipherError> {
let mut encryptor = CbcDecryptor::new(AesSafe128Decryptor::new(k), PkcsPadding, iv.to_vec());
try!(encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true));
Ok(())
let len = dest.len();
let mut buffer = RefWriteBuffer::new(dest);
try!(encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut buffer, true));
Ok(len - buffer.remaining())
}
}

View File

@ -16,6 +16,7 @@
use std::collections::BTreeMap;
use std::sync::RwLock;
use std::mem;
use ethkey::KeyPair;
use crypto::KEY_ITERATIONS;
use random::Random;
@ -56,6 +57,26 @@ impl EthStore {
cache.insert(account.address.clone(), account);
Ok(())
}
fn reload_accounts(&self) -> Result<(), Error> {
let mut cache = self.cache.write().unwrap();
let accounts = try!(self.dir.load());
let new_accounts: BTreeMap<_, _> = accounts.into_iter().map(|account| (account.address.clone(), account)).collect();
mem::replace(&mut *cache, new_accounts);
Ok(())
}
fn get(&self, address: &Address) -> Result<SafeAccount, Error> {
{
let cache = self.cache.read().unwrap();
if let Some(account) = cache.get(address) {
return Ok(account.clone())
}
}
try!(self.reload_accounts());
let cache = self.cache.read().unwrap();
cache.get(address).cloned().ok_or(Error::InvalidAccount)
}
}
impl SecretStore for EthStore {
@ -68,17 +89,15 @@ impl SecretStore for EthStore {
Ok(address)
}
fn accounts(&self) -> Vec<Address> {
self.cache.read().unwrap().keys().cloned().collect()
fn accounts(&self) -> Result<Vec<Address>, Error> {
try!(self.reload_accounts());
Ok(self.cache.read().unwrap().keys().cloned().collect())
}
fn change_password(&self, address: &Address, old_password: &str, new_password: &str) -> Result<(), Error> {
// change password
let account = {
let cache = self.cache.read().unwrap();
let account = try!(cache.get(address).ok_or(Error::InvalidAccount));
try!(account.change_password(old_password, new_password, self.iterations))
};
let account = try!(self.get(address));
let account = try!(account.change_password(old_password, new_password, self.iterations));
// save to file
self.save(account)
@ -86,8 +105,7 @@ impl SecretStore for EthStore {
fn remove_account(&self, address: &Address, password: &str) -> Result<(), Error> {
let can_remove = {
let cache = self.cache.read().unwrap();
let account = try!(cache.get(address).ok_or(Error::InvalidAccount));
let account = try!(self.get(address));
account.check_password(password)
};
@ -101,50 +119,38 @@ impl SecretStore for EthStore {
}
}
fn sign(&self, account: &Address, password: &str, message: &Message) -> Result<Signature, Error> {
let cache = self.cache.read().unwrap();
let account = try!(cache.get(account).ok_or(Error::InvalidAccount));
fn sign(&self, address: &Address, password: &str, message: &Message) -> Result<Signature, Error> {
let account = try!(self.get(address));
account.sign(password, message)
}
fn uuid(&self, addr: &Address) -> Result<UUID, Error> {
let cache = self.cache.read().unwrap();
let account = try!(cache.get(addr).ok_or(Error::InvalidAccount));
fn uuid(&self, address: &Address) -> Result<UUID, Error> {
let account = try!(self.get(address));
Ok(account.id.into())
}
fn name(&self, addr: &Address) -> Result<String, Error> {
let cache = self.cache.read().unwrap();
let account = try!(cache.get(addr).ok_or(Error::InvalidAccount));
fn name(&self, address: &Address) -> Result<String, Error> {
let account = try!(self.get(address));
Ok(account.name.clone())
}
fn meta(&self, addr: &Address) -> Result<String, Error> {
let cache = self.cache.read().unwrap();
let account = try!(cache.get(addr).ok_or(Error::InvalidAccount));
fn meta(&self, address: &Address) -> Result<String, Error> {
let account = try!(self.get(address));
Ok(account.meta.clone())
}
fn set_name(&self, addr: &Address, name: String) -> Result<(), Error> {
let account = {
let cache = self.cache.read().unwrap();
let mut account = try!(cache.get(addr).ok_or(Error::InvalidAccount)).clone();
account.name = name;
account
};
fn set_name(&self, address: &Address, name: String) -> Result<(), Error> {
let mut account = try!(self.get(address));
account.name = name;
// save to file
self.save(account)
}
fn set_meta(&self, addr: &Address, meta: String) -> Result<(), Error> {
let account = {
let cache = self.cache.read().unwrap();
let mut account = try!(cache.get(addr).ok_or(Error::InvalidAccount)).clone();
account.meta = meta;
account
};
fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error> {
let mut account = try!(self.get(address));
account.meta = meta;
// save to file
self.save(account)
}

View File

@ -109,4 +109,3 @@ macro_rules! impl_hash {
impl_hash!(H128, 16);
impl_hash!(H160, 20);
impl_hash!(H256, 32);
impl_hash!(H768, 96);

View File

@ -11,10 +11,10 @@ mod version;
pub use self::cipher::{Cipher, CipherSer, CipherSerParams, Aes128Ctr};
pub use self::crypto::Crypto;
pub use self::error::Error;
pub use self::hash::{H128, H160, H256, H768};
pub use self::hash::{H128, H160, H256};
pub use self::id::UUID;
pub use self::kdf::{Kdf, KdfSer, Prf, Pbkdf2, Scrypt, KdfSerParams};
pub use self::key_file::KeyFile;
pub use self::presale::PresaleWallet;
pub use self::presale::{PresaleWallet, Encseed};
pub use self::version::Version;

View File

@ -1,10 +1,34 @@
use std::io::Read;
use std::ops::Deref;
use serde_json;
use super::{H160, H768};
use serde::{Deserialize, Deserializer, Error};
use rustc_serialize::hex::FromHex;
use super::{H160};
#[derive(Debug, PartialEq)]
pub struct Encseed(Vec<u8>);
impl Deref for Encseed {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Deserialize for Encseed {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: Deserializer
{
let s = try!(String::deserialize(deserializer));
let data = try!(s.from_hex().map_err(|e| Error::custom(format!("Invalid hex value {}", e))));
Ok(Encseed(data))
}
}
#[derive(Debug, PartialEq, Deserialize)]
pub struct PresaleWallet {
pub encseed: H768,
pub encseed: Encseed,
#[serde(rename = "ethaddr")]
pub address: H160,
}
@ -19,7 +43,8 @@ impl PresaleWallet {
mod tests {
use std::str::FromStr;
use serde_json;
use json::{PresaleWallet, H160, H768};
use rustc_serialize::hex::FromHex;
use json::{PresaleWallet, H160, Encseed};
#[test]
fn presale_wallet() {
@ -32,7 +57,27 @@ mod tests {
} "#;
let expected = PresaleWallet {
encseed: H768::from_str("137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066").unwrap(),
encseed: Encseed("137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066".from_hex().unwrap()),
address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(),
};
let wallet: PresaleWallet = serde_json::from_str(json).unwrap();
assert_eq!(expected, wallet);
}
#[test]
fn long_presale_wallet() {
let json = r#"
{
"encseed":
"137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d",
"ethaddr": "ede84640d1a1d3e06902048e67aa7db8d52c2ce1",
"email": "123@gmail.com",
"btcaddr": "1JvqEc6WLhg6GnyrLBe2ztPAU28KRfuseH"
} "#;
let expected = PresaleWallet {
encseed: Encseed("137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d".from_hex().unwrap()),
address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(),
};

View File

@ -10,7 +10,7 @@ use {crypto, Error};
pub struct PresaleWallet {
iv: [u8; 16],
ciphertext: [u8; 80],
ciphertext: Vec<u8>,
address: Address,
}
@ -19,8 +19,8 @@ impl From<json::PresaleWallet> for PresaleWallet {
let mut iv = [0u8; 16];
iv.copy_from_slice(&wallet.encseed[..16]);
let mut ciphertext = [0u8; 80];
ciphertext.copy_from_slice(&wallet.encseed[16..]);
let mut ciphertext = vec![];
ciphertext.extend_from_slice(&wallet.encseed[16..]);
PresaleWallet {
iv: iv,
@ -42,10 +42,11 @@ impl PresaleWallet {
let mut derived_key = vec![0u8; 16];
pbkdf2(&mut h_mac, password.as_bytes(), 2000, &mut derived_key);
let mut key = [0u8; 64];
try!(crypto::aes::decrypt_cbc(&derived_key, &self.iv, &self.ciphertext, &mut key).map_err(|_| Error::InvalidPassword));
let mut key = vec![0; self.ciphertext.len()];
let len = try!(crypto::aes::decrypt_cbc(&derived_key, &self.iv, &self.ciphertext, &mut key).map_err(|_| Error::InvalidPassword));
let unpadded = &key[..len];
let secret = Secret::from(key.keccak256());
let secret = Secret::from(unpadded.keccak256());
if let Ok(kp) = KeyPair::from_secret(secret) {
if kp.address() == self.address {
return Ok(kp)

View File

@ -21,7 +21,7 @@ use json::UUID;
pub trait SecretStore: Send + Sync {
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error>;
fn accounts(&self) -> Vec<Address>;
fn accounts(&self) -> Result<Vec<Address>, Error>;
fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>;

View File

@ -46,11 +46,11 @@ fn random_secret() -> Secret {
fn secret_store_create_account() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts().len(), 0);
assert_eq!(store.accounts().unwrap().len(), 0);
assert!(store.insert_account(random_secret(), "").is_ok());
assert_eq!(store.accounts().len(), 1);
assert_eq!(store.accounts().unwrap().len(), 1);
assert!(store.insert_account(random_secret(), "").is_ok());
assert_eq!(store.accounts().len(), 2);
assert_eq!(store.accounts().unwrap().len(), 2);
}
#[test]
@ -58,7 +58,7 @@ fn secret_store_sign() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store.insert_account(random_secret(), "").is_ok());
let accounts = store.accounts();
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store.sign(&accounts[0], "", &Default::default()).is_ok());
assert!(store.sign(&accounts[0], "1", &Default::default()).is_err());
@ -69,7 +69,7 @@ fn secret_store_change_password() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store.insert_account(random_secret(), "").is_ok());
let accounts = store.accounts();
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store.sign(&accounts[0], "", &Default::default()).is_ok());
assert!(store.change_password(&accounts[0], "", "1").is_ok());
@ -82,10 +82,10 @@ fn secret_store_remove_account() {
let dir = TransientDir::create().unwrap();
let store = EthStore::open(Box::new(dir)).unwrap();
assert!(store.insert_account(random_secret(), "").is_ok());
let accounts = store.accounts();
let accounts = store.accounts().unwrap();
assert_eq!(accounts.len(), 1);
assert!(store.remove_account(&accounts[0], "").is_ok());
assert_eq!(store.accounts().len(), 0);
assert_eq!(store.accounts().unwrap().len(), 0);
assert!(store.remove_account(&accounts[0], "").is_err());
}
@ -107,7 +107,7 @@ fn pat_path() -> &'static str {
fn secret_store_laod_geth_files() {
let dir = DiskDirectory::at(test_path());
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts(), vec![
assert_eq!(store.accounts().unwrap(), vec![
Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap(),
Address::from_str("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf").unwrap(),
Address::from_str("63121b431a52f8043c16fcf0d1df9cb7b5f66649").unwrap(),
@ -118,7 +118,7 @@ fn secret_store_laod_geth_files() {
fn secret_store_load_pat_files() {
let dir = DiskDirectory::at(pat_path());
let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts(), vec![
assert_eq!(store.accounts().unwrap(), vec![
Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap(),
Address::from_str("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf").unwrap(),
]);

20
evmbin/Cargo.lock generated
View File

@ -47,6 +47,19 @@ dependencies = [
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bit-set"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bit-vec"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitflags"
version = "0.3.3"
@ -168,6 +181,7 @@ dependencies = [
name = "ethcore"
version = "1.3.0"
dependencies = [
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -253,7 +267,7 @@ dependencies = [
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb 0.4.5",
"rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -738,14 +752,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rocksdb"
version = "0.4.5"
source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8"
dependencies = [
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0",
"rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)",
]
[[package]]
name = "rocksdb-sys"
version = "0.3.0"
source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8"
dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -19,7 +19,7 @@
use std::collections::HashMap;
use util::{U256, H256, Address, Bytes, FixedHash};
use ethcore::client::EnvInfo;
use ethcore::evm::{self, Ext, ContractCreateResult, MessageCallResult, Schedule};
use ethcore::evm::{self, Ext, ContractCreateResult, MessageCallResult, Schedule, CallType};
pub struct FakeExt {
schedule: Schedule,
@ -67,7 +67,8 @@ impl Ext for FakeExt {
_value: Option<U256>,
_data: &[u8],
_code_address: &Address,
_output: &mut [u8]) -> MessageCallResult {
_output: &mut [u8],
_call_type: CallType) -> MessageCallResult {
unimplemented!();
}

View File

@ -95,3 +95,58 @@ pub fn register(reg: &mut rustc_plugin::Registry) {
reg.register_attribute("ipc".to_owned(), AttributeType::Normal);
}
#[derive(Debug)]
pub enum Error { InvalidFileName, ExpandFailure }
pub fn derive_ipc(src_path: &str) -> Result<(), Error> {
use std::env;
use std::path::{Path, PathBuf};
let out_dir = env::var_os("OUT_DIR").unwrap();
let file_name = try!(PathBuf::from(src_path).file_name().ok_or(Error::InvalidFileName).map(|val| val.to_str().unwrap().to_owned()));
let mut intermediate_file_name = file_name.clone();
intermediate_file_name.push_str(".rpc.in");
let intermediate_path = Path::new(&out_dir).join(&intermediate_file_name);
let final_path = Path::new(&out_dir).join(&file_name);
{
let mut registry = syntex::Registry::new();
register(&mut registry);
if let Err(_) = registry.expand("", &Path::new(src_path), &intermediate_path) {
// will be reported by compiler
return Err(Error::ExpandFailure)
}
}
{
let mut registry = syntex::Registry::new();
register(&mut registry);
if let Err(_) = registry.expand("", &intermediate_path, &final_path) {
// will be reported by compiler
return Err(Error::ExpandFailure)
}
}
Ok(())
}
pub fn derive_binary(src_path: &str) -> Result<(), Error> {
use std::env;
use std::path::{Path, PathBuf};
let out_dir = env::var_os("OUT_DIR").unwrap();
let file_name = try!(PathBuf::from(src_path).file_name().ok_or(Error::InvalidFileName).map(|val| val.to_str().unwrap().to_owned()));
let final_path = Path::new(&out_dir).join(&file_name);
let mut registry = syntex::Registry::new();
register(&mut registry);
if let Err(_) = registry.expand("", &Path::new(src_path), &final_path) {
// will be reported by compiler
return Err(Error::ExpandFailure)
}
Ok(())
}

View File

@ -15,5 +15,4 @@ semver = "0.2"
log = "0.3"
[build-dependencies]
syntex = "*"
ethcore-ipc-codegen = { path = "../codegen" }

View File

@ -14,30 +14,8 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate syntex;
extern crate ethcore_ipc_codegen as codegen;
use std::env;
use std::path::Path;
extern crate ethcore_ipc_codegen;
fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
// ipc pass
{
let src = Path::new("src/service.rs.in");
let dst = Path::new(&out_dir).join("hypervisor_service_ipc.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).unwrap();
}
// serialization pass
{
let src = Path::new(&out_dir).join("hypervisor_service_ipc.rs");
let dst = Path::new(&out_dir).join("hypervisor_service_cg.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).unwrap();
}
ethcore_ipc_codegen::derive_ipc("src/service.rs.in").unwrap();
}

View File

@ -17,4 +17,4 @@
//! Parity interprocess hypervisor IPC service
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/hypervisor_service_cg.rs"));
include!(concat!(env!("OUT_DIR"), "/service.rs.in"));

View File

@ -7,7 +7,6 @@ license = "GPL-3.0"
[features]
[dependencies]
jsonrpc-core = "2.0"
ethcore-ipc = { path = "../rpc" }
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
log = "0.3"

View File

@ -19,14 +19,11 @@
extern crate ethcore_ipc as ipc;
extern crate nanomsg;
#[macro_use] extern crate log;
extern crate jsonrpc_core;
use jsonrpc_core::IoHandler;
pub use ipc::{WithSocket, IpcInterface, IpcConfig};
pub use nanomsg::Socket as NanoSocket;
use std::sync::*;
use std::sync::atomic::*;
use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut};
use std::ops::Deref;
@ -218,149 +215,14 @@ impl<S: ?Sized> Worker<S> where S: IpcInterface {
}
}
/// Error in handling JSON RPC request
pub enum IoHandlerError {
BadRequest,
HandlerError,
}
/// Worker to handle JSON RPC requests
pub struct IoHandlerWorker {
handler: Arc<IoHandler>,
socket: Socket,
_endpoint: Endpoint,
poll: Vec<PollFd>,
buf: Vec<u8>,
}
/// IPC server for json-rpc handler (single thread)
pub struct IoHandlerServer {
is_stopping: Arc<AtomicBool>,
is_stopped: Arc<AtomicBool>,
handler: Arc<IoHandler>,
socket_addr: String,
}
impl IoHandlerServer {
/// New IPC server for JSON RPC `handler` and ipc socket address `socket_addr`
pub fn new(handler: &Arc<IoHandler>, socket_addr: &str) -> IoHandlerServer {
IoHandlerServer {
handler: handler.clone(),
is_stopping: Arc::new(AtomicBool::new(false)),
is_stopped: Arc::new(AtomicBool::new(true)),
socket_addr: socket_addr.to_owned(),
}
}
/// IPC Server starts (non-blocking, in seprate thread)
pub fn start(&self) -> Result<(), SocketError> {
let mut worker = try!(IoHandlerWorker::new(&self.handler, &self.socket_addr));
self.is_stopping.store(false, Ordering::Relaxed);
let worker_is_stopping = self.is_stopping.clone();
let worker_is_stopped = self.is_stopped.clone();
::std::thread::spawn(move || {
worker_is_stopped.store(false, Ordering::Relaxed);
while !worker_is_stopping.load(Ordering::Relaxed) {
worker.poll()
}
worker_is_stopped.store(true, Ordering::Relaxed);
});
Ok(())
}
/// IPC server stop (func will wait until effective stop)
pub fn stop(&self) {
self.is_stopping.store(true, Ordering::Relaxed);
while !self.is_stopped.load(Ordering::Relaxed) {
std::thread::sleep(std::time::Duration::from_millis(50));
}
}
}
impl Drop for IoHandlerServer {
fn drop(&mut self) {
self.stop()
}
}
impl IoHandlerWorker {
pub fn new(handler: &Arc<IoHandler>, socket_addr: &str) -> Result<IoHandlerWorker, SocketError> {
let mut socket = try!(Socket::new(Protocol::Rep).map_err(|e| {
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
SocketError::RequestLink
}));
let endpoint = try!(socket.bind(socket_addr).map_err(|e| {
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e);
SocketError::RequestLink
}));
let poll = vec![socket.new_pollfd(PollInOut::In)];
Ok(IoHandlerWorker {
handler: handler.clone(),
socket: socket,
_endpoint: endpoint,
poll: poll,
buf: Vec::with_capacity(1024),
})
}
pub fn poll(&mut self) {
let mut request = PollRequest::new(&mut self.poll[..]);
let _result_guard = Socket::poll(&mut request, POLL_TIMEOUT);
let fd = request.get_fds()[0]; // guaranteed to exist and be the only one
// because contains only immutable socket field as a member
if !fd.can_read() {
return;
}
unsafe { self.buf.set_len(0); }
match self.socket.nb_read_to_end(&mut self.buf) {
Ok(0) => {
warn!(target: "ipc", "RPC empty message received");
return;
},
Ok(_) => {
let rpc_msg = match String::from_utf8(self.buf.clone()) {
Ok(val) => val,
Err(e) => {
warn!(target: "ipc", "RPC decoding error (utf-8): {:?}", e);
return;
}
};
let response: Option<String> = self.handler.handle_request(&rpc_msg);
if let Some(response_str) = response {
let response_bytes = response_str.into_bytes();
if let Err(e) = self.socket.nb_write(&response_bytes) {
warn!(target: "ipc", "Failed to write response: {:?}", e);
}
}
},
Err(Error::TryAgain) => {
// no data
},
Err(x) => {
warn!(target: "ipc", "Error polling connections {:?}", x);
panic!("IPC RPC fatal error");
},
}
}
}
#[cfg(test)]
mod service_tests {
use super::{Worker, IoHandlerServer};
use super::Worker;
use ipc::*;
use std::io::{Read, Write};
use std::sync::{Arc, RwLock};
use nanomsg::{Socket, Protocol, Endpoint};
use jsonrpc_core;
use jsonrpc_core::{IoHandler, Value, Params, MethodCommand};
struct TestInvoke {
method_num: u16,
@ -377,7 +239,7 @@ mod service_tests {
}
}
impl IpcInterface<DummyService> for DummyService {
impl IpcInterface for DummyService {
fn dispatch<R>(&self, _r: &mut R) -> Vec<u8> where R: Read {
vec![]
}
@ -400,15 +262,6 @@ mod service_tests {
(socket, endpoint)
}
fn dummy_request(addr: &str, buf: &[u8]) -> Vec<u8> {
let mut socket = Socket::new(Protocol::Req).unwrap();
let _endpoint = socket.connect(addr).unwrap();
socket.write(buf).unwrap();
let mut buf = Vec::new();
socket.read_to_end(&mut buf).unwrap();
buf
}
#[test]
fn can_create_worker() {
let worker = Worker::<DummyService>::new(&Arc::new(DummyService::new()));
@ -462,29 +315,4 @@ mod service_tests {
assert_eq!(0, worker.service.methods_stack.read().unwrap()[0].method_num);
assert_eq!(vec![0u8; 1024*1024-2], worker.service.methods_stack.read().unwrap()[0].params);
}
#[test]
fn test_jsonrpc_handler() {
let url = "ipc:///tmp/parity-test50.ipc";
struct SayHello;
impl MethodCommand for SayHello {
fn execute(&self, _params: Params) -> Result<Value, jsonrpc_core::Error> {
Ok(Value::String("hello".to_string()))
}
}
let io = Arc::new(IoHandler::new());
io.add_method("say_hello", SayHello);
let request = r#"{"jsonrpc": "2.0", "method": "say_hello", "params": [42, 23], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"hello","id":1}"#;
let server = IoHandlerServer::new(&io, url);
server.start().unwrap();
assert_eq!(String::from_utf8(dummy_request(url, request.as_bytes())).unwrap(), response.to_string());
server.stop();
}
}

View File

@ -10,10 +10,11 @@ path = "run.rs"
[dependencies]
ethcore-ipc = { path = "../rpc" }
ethcore-devtools = { path = "../../devtools" }
semver = "0.2.0"
semver = "0.2"
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
ethcore-ipc-nano = { path = "../nano" }
ethcore-util = { path = "../../util" }
log = "0.3"
[build-dependencies]
syntex = "0.33"

View File

@ -16,4 +16,4 @@
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/binary.rs"));
include!(concat!(env!("OUT_DIR"), "/binary.rs.in"));

View File

@ -56,3 +56,11 @@ fn opt_two_vec() {
let serialized = ::ipc::binary::serialize(&example).unwrap();
assert_eq!(serialized, vec![0u8; 16]);
}
#[test]
fn enum_with_struct() {
let example = EnumWithStruct::Right { how_much: 15 };
let serialized = ::ipc::binary::serialize(&example).unwrap();
let deserialized = ::ipc::binary::deserialize::<EnumWithStruct>(&serialized).unwrap();
assert_eq!(example, deserialized);
}

View File

@ -14,76 +14,11 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate syntex;
extern crate ethcore_ipc_codegen as codegen;
use std::env;
use std::path::Path;
use std::process::exit;
pub fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
// rpc pass
if {
let src = Path::new("nested.rs.in");
let dst = Path::new(&out_dir).join("nested_ipc.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).is_ok()
}
// serialization pass
{
let src = Path::new(&out_dir).join("nested_ipc.rs");
let dst = Path::new(&out_dir).join("nested_cg.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).unwrap();
}
// rpc pass
if {
let src = Path::new("service.rs.in");
let dst = Path::new(&out_dir).join("service_ipc.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).is_ok()
}
// serialization pass
{
let src = Path::new(&out_dir).join("service_ipc.rs");
let dst = Path::new(&out_dir).join("service_cg.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).unwrap();
}
// rpc pass
if {
let src = Path::new("with_attrs.rs.in");
let dst = Path::new(&out_dir).join("with_attrs_ipc.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).is_ok()
}
// serialization pass
{
let src = Path::new(&out_dir).join("with_attrs_ipc.rs");
let dst = Path::new(&out_dir).join("with_attrs_cg.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
registry.expand("", &src, &dst).unwrap();
}
// rpc pass
{
let src = Path::new("binary.rs.in");
let dst = Path::new(&out_dir).join("binary.rs");
let mut registry = syntex::Registry::new();
codegen::register(&mut registry);
if let Err(err_msg) = registry.expand("", &src, &dst) {
println!("error: {}", err_msg);
exit(1);
}
}
codegen::derive_ipc("nested.rs.in").unwrap();
codegen::derive_ipc("service.rs.in").unwrap();
codegen::derive_ipc("with_attrs.rs.in").unwrap();
codegen::derive_binary("binary.rs.in").unwrap();
}

View File

@ -42,38 +42,6 @@ mod tests {
assert_eq!(10, *service.commits.read().unwrap());
}
#[test]
fn call_service_handshake() {
let mut socket = TestSocket::new_ready(vec![0, 0,
// part count = 3
3, 0, 0, 0, 0, 0, 0, 0,
// part sizes
5, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
64, 0, 0, 0, 0, 0, 0, 0,
// total payload length
70, 0, 0, 0, 0, 0, 0, 0,
// protocol version
b'1', b'.', b'0', b'.', b'0',
// api version
b'1', b'.', b'0', b'.', b'0',
// reserved
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]);
let service = Arc::new(Service::new());
let result = service.dispatch(&mut socket);
// single `true`
assert_eq!(vec![1], result);
}
#[test]
fn call_service_client() {
let mut socket = TestSocket::new();
@ -110,9 +78,9 @@ mod tests {
#[test]
fn query_default_version() {
let ver = Arc::<Service>::protocol_version();
let ver = Service::protocol_version();
assert_eq!(ver, Version::parse("1.0.0").unwrap());
let ver = Arc::<Service>::api_version();
let ver = Service::api_version();
assert_eq!(ver, Version::parse("1.0.0").unwrap());
}
@ -153,16 +121,11 @@ mod tests {
#[test]
fn can_invoke_generic_service() {
let mut socket = TestSocket::new();
socket.read_buffer = vec![
1, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0,
0,
];
socket.read_buffer = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let db_client = DBClient::<u64, _>::init(socket);
let result = db_client.write(vec![0u8; 100]);
let result = db_client.write(vec![1u8; 1]);
assert_eq!(vec![0, 16, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
db_client.socket().write().unwrap().write_buffer.clone());
assert!(result.is_ok());
}

View File

@ -15,4 +15,4 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/nested_cg.rs"));
include!(concat!(env!("OUT_DIR"), "/nested.rs.in"));

View File

@ -15,9 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::RwLock;
use std::ops::*;
use ipc::IpcConfig;
use ipc::BinaryConvertable;
use std::mem;
use ipc::binary::BinaryConvertError;
use std::collections::VecDeque;
@ -29,11 +27,11 @@ pub struct DB<L: Sized> {
}
pub trait DBWriter {
fn write(&self, data: Vec<u8>) -> Result<(), DBError>;
fn write(&self, data: Vec<u8>) -> Result<(), DBError>;
fn write_slice(&self, data: &[u8]) -> Result<(), DBError>;
}
impl IpcConfig<DBWriter> for ::std::sync::Arc<DBWriter> {}
impl IpcConfig for DBWriter {}
#[derive(Binary)]
pub enum DBError { Write, Read }
@ -58,4 +56,4 @@ trait DBNotify {
fn notify(&self, a: u64, b: u64) -> bool;
}
impl IpcConfig<DBNotify> for ::std::sync::Arc<DBNotify> { }
impl IpcConfig for DBNotify { }

View File

@ -22,6 +22,7 @@ extern crate semver;
extern crate nanomsg;
extern crate ethcore_ipc_nano as nanoipc;
extern crate ethcore_util as util;
#[macro_use] extern crate log;
pub mod service;
mod examples;

View File

@ -15,4 +15,4 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/service_cg.rs"));
include!(concat!(env!("OUT_DIR"), "/service.rs.in"));

View File

@ -15,7 +15,6 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::RwLock;
use std::ops::*;
use ipc::IpcConfig;
use std::mem;
use ipc::binary::BinaryConvertError;
@ -70,4 +69,4 @@ impl Service {
}
}
impl ::ipc::IpcConfig<Service> for ::std::sync::Arc<Service> {}
impl ::ipc::IpcConfig for Service {}

View File

@ -15,4 +15,4 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/with_attrs_cg.rs"));
include!(concat!(env!("OUT_DIR"), "/with_attrs.rs.in"));

View File

@ -14,8 +14,6 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::RwLock;
use std::ops::*;
use ipc::IpcConfig;
use std::mem;
use ipc::binary::BinaryConvertError;
@ -31,4 +29,4 @@ impl BadlyNamedService {
}
}
impl ::ipc::IpcConfig<BadlyNamedService> for ::std::sync::Arc<BadlyNamedService> {}
impl IpcConfig for BadlyNamedService {}

View File

@ -17,6 +17,7 @@
//! Spec account deserialization.
use uint::Uint;
use bytes::Bytes;
use spec::builtin::Builtin;
/// Spec account.
@ -28,6 +29,8 @@ pub struct Account {
pub balance: Option<Uint>,
/// Nonce.
pub nonce: Option<Uint>,
/// Code.
pub code: Option<Bytes>
}
impl Account {
@ -41,14 +44,22 @@ impl Account {
mod tests {
use serde_json;
use spec::account::Account;
use util::numbers::U256;
use uint::Uint;
use bytes::Bytes;
#[test]
fn account_deserialization() {
let s = r#"{
"balance": "1",
"builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } }
"nonce": "0",
"builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } },
"code": "1234"
}"#;
let _deserialized: Account = serde_json::from_str(s).unwrap();
// TODO: validate all fields
let deserialized: Account = serde_json::from_str(s).unwrap();
assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1)));
assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0)));
assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34]));
assert!(deserialized.builtin.is_some()); // Further tested in builtin.rs
}
}

View File

@ -14,12 +14,12 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethash params deserialization.
//! Authority params deserialization.
use uint::Uint;
use hash::Address;
/// Ethash params deserialization.
/// Authority params deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct BasicAuthorityParams {
/// Gas limit divisor.
@ -32,7 +32,7 @@ pub struct BasicAuthorityParams {
pub authorities: Vec<Address>,
}
/// Ethash engine deserialization.
/// Authority engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct BasicAuthority {
/// Ethash params.

View File

@ -45,7 +45,7 @@ pub struct Builtin {
#[cfg(test)]
mod tests {
use serde_json;
use spec::builtin::Builtin;
use spec::builtin::{Builtin, Pricing, Linear};
#[test]
fn builtin_deserialization() {
@ -53,7 +53,8 @@ mod tests {
"name": "ecrecover",
"pricing": { "linear": { "base": 3000, "word": 0 } }
}"#;
let _deserialized: Builtin = serde_json::from_str(s).unwrap();
// TODO: validate all fields
let deserialized: Builtin = serde_json::from_str(s).unwrap();
assert_eq!(deserialized.name, "ecrecover");
assert_eq!(deserialized.pricing, Pricing::Linear(Linear { base: 3000, word: 0 }));
}
}

View File

@ -24,6 +24,8 @@ use spec::BasicAuthority;
pub enum Engine {
/// Null engine.
Null,
/// Instantly sealing engine.
InstantSeal,
/// Ethash engine.
Ethash(Ethash),
/// BasicAuthority engine.
@ -44,6 +46,14 @@ mod tests {
let deserialized: Engine = serde_json::from_str(s).unwrap();
assert_eq!(Engine::Null, deserialized);
let s = r#"{
"InstantSeal": null
}"#;
let deserialized: Engine = serde_json::from_str(s).unwrap();
assert_eq!(Engine::InstantSeal, deserialized);
let s = r#"{
"Ethash": {
"params": {

View File

@ -26,7 +26,7 @@ extern crate time;
#[macro_use]
extern crate lazy_static;
use std::env;
use std::{env, thread};
use std::sync::Arc;
use std::fs::File;
use std::io::Write;
@ -91,7 +91,8 @@ pub fn setup_log(config: &Config) -> Result<Arc<RotatingLogger>, String> {
let with_color = if max_log_level() <= LogLevelFilter::Info {
format!("{}{}", Colour::Black.bold().paint(timestamp), record.args())
} else {
format!("{}{}:{}: {}", Colour::Black.bold().paint(timestamp), record.level(), record.target(), record.args())
let name = thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x)));
format!("{}{} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args())
};
let removed_color = kill_color(with_color.as_ref());

View File

@ -20,6 +20,7 @@ const MIN_BC_CACHE_MB: u32 = 4;
const MIN_DB_CACHE_MB: u32 = 2;
const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16;
const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50;
const DEFAULT_TRACE_CACHE_SIZE: u32 = 20;
/// Configuration for application cache sizes.
/// All values are represented in MB.
@ -34,6 +35,8 @@ pub struct CacheConfig {
blockchain: u32,
/// Size of transaction queue cache.
queue: u32,
/// Size of traces cache.
traces: u32,
}
impl Default for CacheConfig {
@ -49,6 +52,7 @@ impl CacheConfig {
db: total * 7 / 8,
blockchain: total / 8,
queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB,
traces: DEFAULT_TRACE_CACHE_SIZE,
}
}
@ -58,6 +62,7 @@ impl CacheConfig {
db: db,
blockchain: blockchain,
queue: queue,
traces: DEFAULT_TRACE_CACHE_SIZE,
}
}
@ -80,6 +85,11 @@ impl CacheConfig {
pub fn blockchain(&self) -> u32 {
max(self.blockchain, MIN_BC_CACHE_MB)
}
/// Size of the traces cache.
pub fn traces(&self) -> u32 {
self.traces
}
}
#[cfg(test)]

View File

@ -74,6 +74,9 @@ Account Options:
[default: 8180].
--signer-path PATH Specify directory where Signer UIs tokens should
be stored. [default: $HOME/.parity/signer]
--signer-no-validation Disable Origin and Host headers validation for
Trusted Signer. WARNING: INSECURE. Used only for
development.
Networking Options:
--no-network Disable p2p networking.
@ -212,7 +215,7 @@ Footprint Options:
the entire system, overrides other cache and queue
options.
--fast-and-loose Disables DB WAL, which gives a significant speed up
but means an unclean exit is unrecoverable.
but means an unclean exit is unrecoverable.
--db-compaction TYPE Database compaction type. TYPE may be one of:
ssd - suitable for SSDs and fast HDDs;
hdd - suitable for slow HDDs [default: ssd].
@ -337,6 +340,7 @@ pub struct Args {
pub flag_no_signer: bool,
pub flag_signer_port: u16,
pub flag_signer_path: String,
pub flag_signer_no_validation: bool,
pub flag_force_sealing: bool,
pub flag_reseal_on_txs: String,
pub flag_reseal_min_period: u64,

View File

@ -303,6 +303,7 @@ impl Configuration {
enabled: self.signer_enabled(),
port: self.args.flag_signer_port,
signer_path: self.directories().signer,
skip_origin_validation: self.args.flag_signer_no_validation,
}
}
@ -789,6 +790,19 @@ mod tests {
assert_eq!(conf0.signer_enabled(), false);
}
#[test]
fn should_parse_signer_allow_all_flag() {
// given
// when
let conf0 = parse(&["parity", "--signer-no-validation"]);
let conf1 = parse(&["parity"]);
// then
assert_eq!(conf0.args.flag_signer_no_validation, true);
assert_eq!(conf1.args.flag_signer_no_validation, false);
}
#[test]
fn should_not_bail_on_empty_line_in_reserved_peers() {
let temp = RandomTempPath::new();

View File

@ -102,7 +102,7 @@ pub fn to_address(s: Option<String>) -> Result<Address, String> {
pub fn to_addresses(s: &Option<String>) -> Result<Vec<Address>, String> {
match *s {
Some(ref adds) if adds.is_empty() => adds.split(',')
Some(ref adds) if !adds.is_empty() => adds.split(',')
.map(|a| clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)))
.collect(),
_ => Ok(Vec::new()),
@ -212,6 +212,10 @@ pub fn to_client_config(
client_config.db_cache_size = Some(cache_config.db_state_cache_size() as usize);
// db queue cache size, in bytes
client_config.queue.max_mem_use = cache_config.queue() as usize * mb;
// in bytes
client_config.tracing.max_cache_size = cache_config.traces() as usize * mb;
// in bytes
client_config.tracing.pref_cache_size = cache_config.traces() as usize * 3 / 4 * mb;
client_config.mode = mode;
client_config.tracing.enabled = tracing;
@ -295,7 +299,7 @@ mod tests {
use util::{U256};
use ethcore::client::{Mode, BlockID};
use ethcore::miner::PendingSet;
use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_price, geth_ipc_path, to_bootnodes};
use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_addresses, to_price, geth_ipc_path, to_bootnodes};
#[test]
fn test_to_duration() {
@ -366,6 +370,18 @@ mod tests {
assert_eq!(to_address(None).unwrap(), Default::default());
}
#[test]
fn test_to_addresses() {
let addresses = to_addresses(&Some("0xD9A111feda3f362f55Ef1744347CDC8Dd9964a41,D9A111feda3f362f55Ef1744347CDC8Dd9964a42".into())).unwrap();
assert_eq!(
addresses,
vec![
"D9A111feda3f362f55Ef1744347CDC8Dd9964a41".parse().unwrap(),
"D9A111feda3f362f55Ef1744347CDC8Dd9964a42".parse().unwrap(),
]
);
}
#[test]
#[cfg_attr(feature = "dev", allow(float_cmp))]
fn test_to_price() {

View File

@ -241,7 +241,7 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
// Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted)
let _ = fs::remove_dir_all(db_path.clone());
try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_HEADERS, Extract::Header, &compaction_profile));
try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_BODIES, Extract::Header, &compaction_profile));
try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_BODIES, Extract::Body, &compaction_profile));
try!(consolidate_database(legacy::extras_database_path(path), db_path.clone(), client::DB_COL_EXTRA, Extract::All, &compaction_profile));
try!(consolidate_database(legacy::state_database_path(path), db_path.clone(), client::DB_COL_STATE, Extract::All, &compaction_profile));
try!(consolidate_database(legacy::trace_database_path(path), db_path.clone(), client::DB_COL_TRACE, Extract::All, &compaction_profile));

View File

@ -104,8 +104,8 @@ pub struct Dependencies {
pub external_miner: Arc<ExternalMiner>,
pub logger: Arc<RotatingLogger>,
pub settings: Arc<NetworkSettings>,
pub allow_pending_receipt_query: bool,
pub net_service: Arc<ManageNetwork>,
pub geth_compatibility: bool,
}
fn to_modules(apis: &[Api]) -> BTreeMap<String, String> {
@ -163,7 +163,10 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
&deps.secret_store,
&deps.miner,
&deps.external_miner,
deps.allow_pending_receipt_query
EthClientOptions {
allow_pending_receipt_query: !deps.geth_compatibility,
send_block_number_in_get_work: !deps.geth_compatibility,
}
);
server.add_delegate(client.to_delegate());

View File

@ -211,8 +211,8 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
external_miner: external_miner.clone(),
logger: logger.clone(),
settings: Arc::new(cmd.net_settings.clone()),
allow_pending_receipt_query: !cmd.geth_compatibility,
net_service: manage_network.clone()
net_service: manage_network.clone(),
geth_compatibility: cmd.geth_compatibility,
});
let dependencies = rpc::Dependencies {
@ -311,7 +311,7 @@ fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig) -> Result<A
for a in cfg.unlocked_accounts {
if passwords.iter().find(|p| account_service.unlock_account_permanently(a, (*p).clone()).is_ok()).is_none() {
return Err(format!("No password given to unlock account {}. Pass the password using `--password`.", a));
return Err(format!("No password found to unlock account {}. Make sure valid password is present in files passed using `--password`.", a));
}
}

View File

@ -32,6 +32,7 @@ pub struct Configuration {
pub enabled: bool,
pub port: u16,
pub signer_path: String,
pub skip_origin_validation: bool,
}
impl Default for Configuration {
@ -40,6 +41,7 @@ impl Default for Configuration {
enabled: true,
port: 8180,
signer_path: replace_home("$HOME/.parity/signer"),
skip_origin_validation: false,
}
}
}
@ -89,6 +91,11 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result<SignerServer, Str
deps.apis.signer_queue.clone(),
codes_path(conf.signer_path),
);
if conf.skip_origin_validation {
warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Trusted Signer with no origin validation."));
info!("If you do not intend this, exit now.");
}
let server = server.skip_origin_validation(conf.skip_origin_validation);
let server = rpc_apis::setup_rpc(server, deps.apis, rpc_apis::ApiSet::SafeContext);
server.start(addr)
};

View File

@ -21,5 +21,5 @@ mod signing_queue;
pub use self::poll_manager::PollManager;
pub use self::poll_filter::PollFilter;
pub use self::requests::{TransactionRequest, TransactionConfirmation, CallRequest};
pub use self::requests::{TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest};
pub use self::signing_queue::{ConfirmationsQueue, ConfirmationPromise, ConfirmationResult, SigningQueue, QueueEvent};

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::{Address, U256};
use util::{Address, U256, Bytes, H256};
/// Transaction request coming from RPC
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash)]
@ -30,18 +30,42 @@ pub struct TransactionRequest {
/// Value of transaction in wei
pub value: Option<U256>,
/// Additional data sent with transaction
pub data: Option<Vec<u8>>,
pub data: Option<Bytes>,
/// Transaction's nonce
pub nonce: Option<U256>,
}
/// Transaction confirmation waiting in a queue
/// Transaction request coming from RPC with default values filled in.
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash)]
pub struct TransactionConfirmation {
/// Id of this confirmation
pub id: U256,
/// TransactionRequest
pub transaction: TransactionRequest,
pub struct FilledTransactionRequest {
/// Sender
pub from: Address,
/// Recipient
pub to: Option<Address>,
/// Gas Price
pub gas_price: U256,
/// Gas
pub gas: U256,
/// Value of transaction in wei
pub value: U256,
/// Additional data sent with transaction
pub data: Bytes,
/// Transaction's nonce
pub nonce: Option<U256>,
}
impl From<FilledTransactionRequest> for TransactionRequest {
fn from(r: FilledTransactionRequest) -> Self {
TransactionRequest {
from: r.from,
to: r.to,
gas_price: Some(r.gas_price),
gas: Some(r.gas),
value: Some(r.value),
data: Some(r.data),
nonce: r.nonce,
}
}
}
/// Call request
@ -62,3 +86,21 @@ pub struct CallRequest {
/// Nonce
pub nonce: Option<U256>,
}
/// Confirmation object
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct ConfirmationRequest {
/// Id of this confirmation
pub id: U256,
/// Payload to confirm
pub payload: ConfirmationPayload,
}
/// Payload to confirm in Trusted Signer
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub enum ConfirmationPayload {
/// Transaction
Transaction(FilledTransactionRequest),
/// Sign request
Sign(Address, H256),
}

View File

@ -17,10 +17,10 @@
use std::thread;
use std::time::{Instant, Duration};
use std::sync::{mpsc, Arc};
use std::collections::HashMap;
use std::collections::BTreeMap;
use jsonrpc_core;
use util::{Mutex, RwLock, U256};
use v1::helpers::{TransactionRequest, TransactionConfirmation};
use v1::helpers::{ConfirmationRequest, ConfirmationPayload};
/// Result that can be returned from JSON RPC.
pub type RpcResult = Result<jsonrpc_core::Value, jsonrpc_core::Error>;
@ -54,41 +54,41 @@ pub type QueueEventReceiver = mpsc::Receiver<QueueEvent>;
pub trait SigningQueue: Send + Sync {
/// Add new request to the queue.
/// Returns a `ConfirmationPromise` that can be used to await for resolution of given request.
fn add_request(&self, transaction: TransactionRequest) -> ConfirmationPromise;
fn add_request(&self, request: ConfirmationPayload) -> ConfirmationPromise;
/// Removes a request from the queue.
/// Notifies possible token holders that transaction was rejected.
fn request_rejected(&self, id: U256) -> Option<TransactionConfirmation>;
/// Notifies possible token holders that request was rejected.
fn request_rejected(&self, id: U256) -> Option<ConfirmationRequest>;
/// Removes a request from the queue.
/// Notifies possible token holders that transaction was confirmed and given hash was assigned.
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<TransactionConfirmation>;
/// Notifies possible token holders that request was confirmed and given hash was assigned.
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<ConfirmationRequest>;
/// Returns a request if it is contained in the queue.
fn peek(&self, id: &U256) -> Option<TransactionConfirmation>;
fn peek(&self, id: &U256) -> Option<ConfirmationRequest>;
/// Return copy of all the requests in the queue.
fn requests(&self) -> Vec<TransactionConfirmation>;
fn requests(&self) -> Vec<ConfirmationRequest>;
/// Returns number of transactions awaiting confirmation.
/// Returns number of requests awaiting confirmation.
fn len(&self) -> usize;
/// Returns true if there are no transactions awaiting confirmation.
/// Returns true if there are no requests awaiting confirmation.
fn is_empty(&self) -> bool;
}
#[derive(Debug, Clone, PartialEq)]
/// Result of a pending transaction.
/// Result of a pending confirmation request.
pub enum ConfirmationResult {
/// The transaction has not yet been confirmed nor rejected.
/// The request has not yet been confirmed nor rejected.
Waiting,
/// The transaction has been rejected.
/// The request has been rejected.
Rejected,
/// The transaction has been confirmed.
/// The request has been confirmed.
Confirmed(RpcResult),
}
/// Time you need to confirm the transaction in UI.
/// Time you need to confirm the request in UI.
/// This is the amount of time token holder will wait before
/// returning `None`.
/// Unless we have a multi-threaded RPC this will lock
@ -100,12 +100,14 @@ const QUEUE_TIMEOUT_DURATION_SEC : u64 = 20;
pub struct ConfirmationToken {
result: Arc<Mutex<ConfirmationResult>>,
handle: thread::Thread,
request: TransactionConfirmation,
request: ConfirmationRequest,
timeout: Duration,
}
pub struct ConfirmationPromise {
id: U256,
result: Arc<Mutex<ConfirmationResult>>,
timeout: Duration,
}
impl ConfirmationToken {
@ -121,6 +123,7 @@ impl ConfirmationToken {
ConfirmationPromise {
id: self.request.id,
result: self.result.clone(),
timeout: self.timeout,
}
}
}
@ -134,8 +137,7 @@ impl ConfirmationPromise {
/// Returns `None` if transaction was rejected or timeout reached.
/// Returns `Some(result)` if transaction was confirmed.
pub fn wait_with_timeout(&self) -> Option<RpcResult> {
let timeout = Duration::from_secs(QUEUE_TIMEOUT_DURATION_SEC);
let res = self.wait_until(Instant::now() + timeout);
let res = self.wait_until(Instant::now() + self.timeout);
match res {
ConfirmationResult::Confirmed(h) => Some(h),
ConfirmationResult::Rejected | ConfirmationResult::Waiting => None,
@ -146,16 +148,16 @@ impl ConfirmationPromise {
pub fn result(&self) -> ConfirmationResult { self.wait_until(Instant::now()) }
/// Blocks current thread and awaits for
/// resolution of the transaction (rejected / confirmed)
/// Returns `None` if transaction was rejected or timeout reached.
/// Returns `Some(result)` if transaction was confirmed.
/// resolution of the request (rejected / confirmed)
/// Returns `None` if request was rejected or timeout reached.
/// Returns `Some(result)` if request was confirmed.
pub fn wait_until(&self, deadline: Instant) -> ConfirmationResult {
trace!(target: "own_tx", "Signer: Awaiting transaction confirmation... ({:?}).", self.id);
trace!(target: "own_tx", "Signer: Awaiting confirmation... ({:?}).", self.id);
loop {
let now = Instant::now();
// Check the result...
match *self.result.lock() {
// Waiting and deadline not yet passed continue looping.
// Waiting and deadline not yet passed continue looping.
ConfirmationResult::Waiting if now < deadline => {}
// Anything else - return.
ref a => return a.clone(),
@ -166,12 +168,13 @@ impl ConfirmationPromise {
}
}
/// Queue for all unconfirmed transactions.
/// Queue for all unconfirmed requests.
pub struct ConfirmationsQueue {
id: Mutex<U256>,
queue: RwLock<HashMap<U256, ConfirmationToken>>,
queue: RwLock<BTreeMap<U256, ConfirmationToken>>,
sender: Mutex<mpsc::Sender<QueueEvent>>,
receiver: Mutex<Option<mpsc::Receiver<QueueEvent>>>,
timeout: Duration,
}
impl Default for ConfirmationsQueue {
@ -180,14 +183,23 @@ impl Default for ConfirmationsQueue {
ConfirmationsQueue {
id: Mutex::new(U256::from(0)),
queue: RwLock::new(HashMap::new()),
queue: RwLock::new(BTreeMap::new()),
sender: Mutex::new(send),
receiver: Mutex::new(Some(recv)),
timeout: Duration::from_secs(QUEUE_TIMEOUT_DURATION_SEC),
}
}
}
impl ConfirmationsQueue {
#[cfg(test)]
/// Creates new confirmations queue with specified timeout
pub fn with_timeout(timeout: Duration) -> Self {
let mut queue = Self::default();
queue.timeout = timeout;
queue
}
/// Blocks the thread and starts listening for notifications regarding all actions in the queue.
/// For each event, `listener` callback will be invoked.
/// This method can be used only once (only single consumer of events can exist).
@ -221,9 +233,9 @@ impl ConfirmationsQueue {
let _ = self.sender.lock().send(message);
}
/// Removes transaction from this queue and notifies `ConfirmationPromise` holders about the result.
/// Removes requests from this queue and notifies `ConfirmationPromise` holders about the result.
/// Notifies also a receiver about that event.
fn remove(&self, id: U256, result: Option<RpcResult>) -> Option<TransactionConfirmation> {
fn remove(&self, id: U256, result: Option<RpcResult>) -> Option<ConfirmationRequest> {
let token = self.queue.write().remove(&id);
if let Some(token) = token {
@ -248,7 +260,7 @@ impl Drop for ConfirmationsQueue {
}
impl SigningQueue for ConfirmationsQueue {
fn add_request(&self, transaction: TransactionRequest) -> ConfirmationPromise {
fn add_request(&self, request: ConfirmationPayload) -> ConfirmationPromise {
// Increment id
let id = {
let mut last_id = self.id.lock();
@ -257,16 +269,19 @@ impl SigningQueue for ConfirmationsQueue {
};
// Add request to queue
let res = {
debug!(target: "own_tx", "Signer: New entry ({:?}) in confirmation queue.", id);
trace!(target: "own_tx", "Signer: ({:?}) : {:?}", id, request);
let mut queue = self.queue.write();
queue.insert(id, ConfirmationToken {
result: Arc::new(Mutex::new(ConfirmationResult::Waiting)),
handle: thread::current(),
request: TransactionConfirmation {
request: ConfirmationRequest {
id: id,
transaction: transaction,
payload: request,
},
timeout: self.timeout,
});
debug!(target: "own_tx", "Signer: New transaction ({:?}) in confirmation queue.", id);
queue.get(&id).map(|token| token.as_promise()).expect("Token was just inserted.")
};
// Notify listeners
@ -275,21 +290,21 @@ impl SigningQueue for ConfirmationsQueue {
}
fn peek(&self, id: &U256) -> Option<TransactionConfirmation> {
fn peek(&self, id: &U256) -> Option<ConfirmationRequest> {
self.queue.read().get(id).map(|token| token.request.clone())
}
fn request_rejected(&self, id: U256) -> Option<TransactionConfirmation> {
debug!(target: "own_tx", "Signer: Transaction rejected ({:?}).", id);
fn request_rejected(&self, id: U256) -> Option<ConfirmationRequest> {
debug!(target: "own_tx", "Signer: Request rejected ({:?}).", id);
self.remove(id, None)
}
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<TransactionConfirmation> {
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<ConfirmationRequest> {
debug!(target: "own_tx", "Signer: Transaction confirmed ({:?}).", id);
self.remove(id, Some(result))
}
fn requests(&self) -> Vec<TransactionConfirmation> {
fn requests(&self) -> Vec<ConfirmationRequest> {
let queue = self.queue.read();
queue.values().map(|token| token.request.clone()).collect()
}
@ -312,20 +327,20 @@ mod test {
use std::thread;
use std::sync::Arc;
use util::{Address, U256, H256, Mutex};
use v1::helpers::{SigningQueue, ConfirmationsQueue, QueueEvent, TransactionRequest};
use v1::helpers::{SigningQueue, ConfirmationsQueue, QueueEvent, FilledTransactionRequest, ConfirmationPayload};
use v1::types::H256 as NH256;
use jsonrpc_core::to_value;
fn request() -> TransactionRequest {
TransactionRequest {
fn request() -> ConfirmationPayload {
ConfirmationPayload::Transaction(FilledTransactionRequest {
from: Address::from(1),
to: Some(Address::from(2)),
gas_price: None,
gas: None,
value: Some(U256::from(10_000_000)),
data: None,
gas_price: 0.into(),
gas: 10_000.into(),
value: 10_000_000.into(),
data: vec![],
nonce: None,
}
})
}
#[test]
@ -391,6 +406,6 @@ mod test {
assert_eq!(all.len(), 1);
let el = all.get(0).unwrap();
assert_eq!(el.id, U256::from(1));
assert_eq!(el.transaction, request);
assert_eq!(el.payload, request);
}
}

View File

@ -47,6 +47,23 @@ use v1::helpers::CallRequest as CRequest;
use v1::impls::{default_gas_price, dispatch_transaction, error_codes};
use serde;
/// Eth RPC options
pub struct EthClientOptions {
/// Returns receipt from pending blocks
pub allow_pending_receipt_query: bool,
/// Send additional block number when asking for work
pub send_block_number_in_get_work: bool,
}
impl Default for EthClientOptions {
fn default() -> Self {
EthClientOptions {
allow_pending_receipt_query: true,
send_block_number_in_get_work: true,
}
}
}
/// Eth rpc implementation.
pub struct EthClient<C, S: ?Sized, M, EM> where
C: MiningBlockChainClient,
@ -60,7 +77,7 @@ pub struct EthClient<C, S: ?Sized, M, EM> where
miner: Weak<M>,
external_miner: Arc<EM>,
seed_compute: Mutex<SeedHashCompute>,
allow_pending_receipt_query: bool,
options: EthClientOptions,
}
impl<C, S: ?Sized, M, EM> EthClient<C, S, M, EM> where
@ -70,7 +87,7 @@ impl<C, S: ?Sized, M, EM> EthClient<C, S, M, EM> where
EM: ExternalMinerService {
/// Creates new EthClient.
pub fn new(client: &Arc<C>, sync: &Arc<S>, accounts: &Arc<AccountProvider>, miner: &Arc<M>, em: &Arc<EM>, allow_pending_receipt_query: bool)
pub fn new(client: &Arc<C>, sync: &Arc<S>, accounts: &Arc<AccountProvider>, miner: &Arc<M>, em: &Arc<EM>, options: EthClientOptions)
-> EthClient<C, S, M, EM> {
EthClient {
client: Arc::downgrade(client),
@ -79,7 +96,7 @@ impl<C, S: ?Sized, M, EM> EthClient<C, S, M, EM> where
accounts: Arc::downgrade(accounts),
external_miner: em.clone(),
seed_compute: Mutex::new(SeedHashCompute::new()),
allow_pending_receipt_query: allow_pending_receipt_query,
options: options,
}
}
@ -316,7 +333,7 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
fn is_mining(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
match params {
Params::None => to_value(&self.external_miner.is_mining()),
Params::None => to_value(&(take_weak!(self.miner).is_sealing())),
_ => Err(Error::invalid_params())
}
}
@ -340,10 +357,16 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
}
}
fn accounts(&self, _: Params) -> Result<Value, Error> {
fn accounts(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
let store = take_weak!(self.accounts);
to_value(&store.accounts().into_iter().map(Into::into).collect::<Vec<RpcH160>>())
match params {
Params::None => {
let store = take_weak!(self.accounts);
let accounts = try!(store.accounts().map_err(|_| Error::internal_error()));
to_value(&accounts.into_iter().map(Into::into).collect::<Vec<RpcH160>>())
},
_ => Err(Error::invalid_params())
}
}
fn block_number(&self, params: Params) -> Result<Value, Error> {
@ -375,7 +398,7 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
match block_number {
BlockNumber::Pending => to_value(&RpcU256::from(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position)))),
id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) {
Some(s) => to_value(&RpcU256::from(s)),
Some(s) => to_value(&RpcH256::from(s)),
None => Err(make_unsupported_err()), // None is only returned on unsupported requests.
}
}
@ -490,7 +513,7 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
let miner = take_weak!(self.miner);
let hash: H256 = hash.into();
match miner.pending_receipts().get(&hash) {
Some(receipt) if self.allow_pending_receipt_query => to_value(&Receipt::from(receipt.clone())),
Some(receipt) if self.options.allow_pending_receipt_query => to_value(&Receipt::from(receipt.clone())),
_ => {
let client = take_weak!(self.client);
let receipt = client.transaction_receipt(TransactionID::Hash(hash));
@ -576,8 +599,13 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
let pow_hash = b.hash();
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
let seed_hash = self.seed_compute.lock().get_seedhash(b.block().header().number());
let block_number = RpcU256::from(b.block().header().number());
to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target), block_number))
if self.options.send_block_number_in_get_work {
let block_number = RpcU256::from(b.block().header().number());
to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target), block_number))
} else {
to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target)))
}
}).unwrap_or(Err(Error::internal_error())) // no work found.
},
_ => Err(Error::invalid_params())

View File

@ -23,24 +23,21 @@ use ethcore::client::MiningBlockChainClient;
use util::{U256, Address, H256, Mutex};
use transient_hashmap::TransientHashMap;
use ethcore::account_provider::AccountProvider;
use v1::helpers::{SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationsQueue, TransactionRequest as TRequest};
use v1::helpers::{SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationsQueue, ConfirmationPayload, TransactionRequest as TRequest, FilledTransactionRequest as FilledRequest};
use v1::traits::EthSigning;
use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U256 as RpcU256};
use v1::impls::{default_gas_price, sign_and_dispatch, transaction_rejected_error};
use v1::impls::{default_gas_price, sign_and_dispatch, transaction_rejected_error, signer_disabled_error};
fn fill_optional_fields<C, M>(request: &mut TRequest, client: &C, miner: &M)
fn fill_optional_fields<C, M>(request: TRequest, client: &C, miner: &M) -> FilledRequest
where C: MiningBlockChainClient, M: MinerService {
if request.value.is_none() {
request.value = Some(U256::from(0));
}
if request.gas.is_none() {
request.gas = Some(miner.sensible_gas_limit());
}
if request.gas_price.is_none() {
request.gas_price = Some(default_gas_price(client, miner));
}
if request.data.is_none() {
request.data = Some(Vec::new());
FilledRequest {
from: request.from,
to: request.to,
nonce: request.nonce,
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(client, miner)),
gas: request.gas.unwrap_or_else(|| miner.sensible_gas_limit()),
value: request.value.unwrap_or_else(|| 0.into()),
data: request.data.unwrap_or_else(Vec::new),
}
}
@ -74,10 +71,26 @@ impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: Miner
Ok(())
}
fn dispatch<F: FnOnce(ConfirmationPromise) -> Result<Value, Error>>(&self, params: Params, f: F) -> Result<Value, Error> {
fn dispatch_sign<F: FnOnce(ConfirmationPromise) -> Result<Value, Error>>(&self, params: Params, f: F) -> Result<Value, Error> {
from_params::<(RpcH160, RpcH256)>(params).and_then(|(address, msg)| {
let address: Address = address.into();
let msg: H256 = msg.into();
let accounts = take_weak!(self.accounts);
if accounts.is_unlocked(address) {
return to_value(&accounts.sign(address, msg).ok().map_or_else(RpcH520::default, Into::into));
}
let queue = take_weak!(self.queue);
let promise = queue.add_request(ConfirmationPayload::Sign(address, msg));
f(promise)
})
}
fn dispatch_transaction<F: FnOnce(ConfirmationPromise) -> Result<Value, Error>>(&self, params: Params, f: F) -> Result<Value, Error> {
from_params::<(TransactionRequest, )>(params)
.and_then(|(request, )| {
let mut request: TRequest = request.into();
let request: TRequest = request.into();
let accounts = take_weak!(self.accounts);
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
@ -87,8 +100,8 @@ impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: Miner
}
let queue = take_weak!(self.queue);
fill_optional_fields(&mut request, &*client, &*miner);
let promise = queue.add_request(request);
let request = fill_optional_fields(request, &*client, &*miner);
let promise = queue.add_request(ConfirmationPayload::Transaction(request));
f(promise)
})
}
@ -98,23 +111,32 @@ impl<C, M> EthSigning for EthSigningQueueClient<C, M>
where C: MiningBlockChainClient + 'static, M: MinerService + 'static
{
fn sign(&self, _params: Params) -> Result<Value, Error> {
fn sign(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
warn!("Invoking eth_sign is not yet supported with signer enabled.");
// TODO [ToDr] Implement sign when rest of the signing queue is ready.
rpc_unimplemented!()
self.dispatch_sign(params, |promise| {
promise.wait_with_timeout().unwrap_or_else(|| to_value(&RpcH520::default()))
})
}
fn post_sign(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
self.dispatch_sign(params, |promise| {
let id = promise.id();
self.pending.lock().insert(id, promise);
to_value(&RpcU256::from(id))
})
}
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
self.dispatch(params, |promise| {
self.dispatch_transaction(params, |promise| {
promise.wait_with_timeout().unwrap_or_else(|| to_value(&RpcH256::default()))
})
}
fn post_transaction(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
self.dispatch(params, |promise| {
self.dispatch_transaction(params, |promise| {
let id = promise.id();
self.pending.lock().insert(id, promise);
to_value(&RpcU256::from(id))
@ -193,13 +215,18 @@ impl<C, M> EthSigning for EthSigningUnsafeClient<C, M> where
})
}
fn post_sign(&self, _: Params) -> Result<Value, Error> {
// We don't support this in non-signer mode.
Err(signer_disabled_error())
}
fn post_transaction(&self, _: Params) -> Result<Value, Error> {
// We don't support this in non-signer mode.
Err(Error::invalid_params())
Err(signer_disabled_error())
}
fn check_transaction(&self, _: Params) -> Result<Value, Error> {
// We don't support this in non-signer mode.
Err(Error::invalid_params())
Err(signer_disabled_error())
}
}

View File

@ -27,7 +27,7 @@ use ethcore::miner::MinerService;
use v1::traits::Ethcore;
use v1::types::{Bytes, U256};
use v1::helpers::{SigningQueue, ConfirmationsQueue};
use v1::impls::error_codes;
use v1::impls::signer_disabled_error;
/// Ethcore implementation.
pub struct EthcoreClient<C, M> where
@ -152,11 +152,7 @@ impl<C, M> Ethcore for EthcoreClient<C, M> where M: MinerService + 'static, C: M
fn unsigned_transactions_count(&self, _params: Params) -> Result<Value, Error> {
try!(self.active());
match self.confirmations_queue {
None => Err(Error {
code: ErrorCode::ServerError(error_codes::SIGNER_DISABLED),
message: "Trusted Signer is disabled. This API is not available.".into(),
data: None
}),
None => Err(signer_disabled_error()),
Some(ref queue) => to_value(&queue.len()),
}
}

View File

@ -42,7 +42,7 @@ mod traces;
mod rpc;
pub use self::web3::Web3Client;
pub use self::eth::EthClient;
pub use self::eth::{EthClient, EthClientOptions};
pub use self::eth_filter::EthFilterClient;
pub use self::eth_signing::{EthSigningUnsafeClient, EthSigningQueueClient};
pub use self::net::NetClient;
@ -54,7 +54,7 @@ pub use self::traces::TracesClient;
pub use self::rpc::RpcClient;
use v1::helpers::TransactionRequest;
use v1::types::H256 as NH256;
use v1::types::{H256 as RpcH256, H520 as RpcH520};
use ethcore::error::Error as EthcoreError;
use ethcore::miner::MinerService;
use ethcore::client::MiningBlockChainClient;
@ -80,7 +80,7 @@ mod error_codes {
fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result<Value, Error>
where C: MiningBlockChainClient, M: MinerService {
let hash = NH256::from(signed_transaction.hash());
let hash = RpcH256::from(signed_transaction.hash());
let import = miner.import_own_transaction(client, signed_transaction);
@ -89,6 +89,12 @@ fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedT
.and_then(|_| to_value(&hash))
}
fn signature_with_password(accounts: &AccountProvider, address: Address, hash: H256, pass: String) -> Result<Value, Error> {
accounts.sign_with_password(address, pass, hash)
.map_err(password_error)
.and_then(|hash| to_value(&RpcH520::from(hash)))
}
fn prepare_transaction<C, M>(client: &C, miner: &M, request: TransactionRequest) -> Transaction where C: MiningBlockChainClient, M: MinerService {
Transaction {
nonce: request.nonce
@ -105,9 +111,10 @@ fn prepare_transaction<C, M>(client: &C, miner: &M, request: TransactionRequest)
}
}
fn unlock_sign_and_dispatch<C, M>(client: &C, miner: &M, request: TransactionRequest, account_provider: &AccountProvider, address: Address, password: String) -> Result<Value, Error>
fn unlock_sign_and_dispatch<C, M>(client: &C, miner: &M, request: TransactionRequest, account_provider: &AccountProvider, password: String) -> Result<Value, Error>
where C: MiningBlockChainClient, M: MinerService {
let address = request.from;
let signed_transaction = {
let t = prepare_transaction(client, miner, request);
let hash = t.hash();
@ -140,6 +147,14 @@ fn default_gas_price<C, M>(client: &C, miner: &M) -> U256 where C: MiningBlockCh
.unwrap_or_else(|_| miner.sensible_gas_price())
}
fn signer_disabled_error() -> Error {
Error {
code: ErrorCode::ServerError(error_codes::SIGNER_DISABLED),
message: "Trusted Signer is disabled. This API is not available.".into(),
data: None
}
}
fn signing_error(error: AccountError) -> Error {
Error {
code: ErrorCode::ServerError(error_codes::ACCOUNT_LOCKED),

View File

@ -62,10 +62,16 @@ impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBl
.unwrap_or_else(|| to_value(&false))
}
fn accounts(&self, _: Params) -> Result<Value, Error> {
fn accounts(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
let store = take_weak!(self.accounts);
to_value(&store.accounts().into_iter().map(Into::into).collect::<Vec<RpcH160>>())
match params {
Params::None => {
let store = take_weak!(self.accounts);
let accounts = try!(store.accounts().map_err(|_| Error::internal_error()));
to_value(&accounts.into_iter().map(Into::into).collect::<Vec<RpcH160>>())
},
_ => Err(Error::invalid_params())
}
}
fn new_account(&self, params: Params) -> Result<Value, Error> {
@ -99,10 +105,9 @@ impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBl
from_params::<(TransactionRequest, String)>(params)
.and_then(|(request, password)| {
let request: TRequest = request.into();
let sender = request.from;
let accounts = take_weak!(self.accounts);
unlock_sign_and_dispatch(&*take_weak!(self.client), &*take_weak!(self.miner), request, &*accounts, sender, password)
unlock_sign_and_dispatch(&*take_weak!(self.client), &*take_weak!(self.miner), request, &*accounts, password)
})
}

View File

@ -22,9 +22,9 @@ use ethcore::account_provider::AccountProvider;
use ethcore::client::MiningBlockChainClient;
use ethcore::miner::MinerService;
use v1::traits::PersonalSigner;
use v1::types::{TransactionModification, TransactionConfirmation, U256};
use v1::impls::unlock_sign_and_dispatch;
use v1::helpers::{SigningQueue, ConfirmationsQueue};
use v1::types::{TransactionModification, ConfirmationRequest, U256};
use v1::impls::{unlock_sign_and_dispatch, signature_with_password};
use v1::helpers::{SigningQueue, ConfirmationsQueue, ConfirmationPayload};
/// Transactions confirmation (personal) rpc implementation.
pub struct SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
@ -55,14 +55,16 @@ impl<C: 'static, M: 'static> SignerClient<C, M> where C: MiningBlockChainClient,
impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
fn transactions_to_confirm(&self, _params: Params) -> Result<Value, Error> {
fn requests_to_confirm(&self, _params: Params) -> Result<Value, Error> {
try!(self.active());
let queue = take_weak!(self.queue);
to_value(&queue.requests().into_iter().map(From::from).collect::<Vec<TransactionConfirmation>>())
to_value(&queue.requests().into_iter().map(From::from).collect::<Vec<ConfirmationRequest>>())
}
fn confirm_transaction(&self, params: Params) -> Result<Value, Error> {
fn confirm_request(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
// TODO [ToDr] TransactionModification is redundant for some calls
// might be better to replace it in future
from_params::<(U256, TransactionModification, String)>(params).and_then(
|(id, modification, pass)| {
let id = id.into();
@ -70,17 +72,23 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
let queue = take_weak!(self.queue);
let client = take_weak!(self.client);
let miner = take_weak!(self.miner);
queue.peek(&id).map(|confirmation| {
let mut request = confirmation.transaction;
// apply modification
if let Some(gas_price) = modification.gas_price {
request.gas_price = Some(gas_price.into());
}
let sender = request.from;
let result = unlock_sign_and_dispatch(&*client, &*miner, request, &*accounts, sender, pass);
if let Ok(ref hash) = result {
queue.request_confirmed(id, Ok(hash.clone()));
queue.peek(&id).map(|confirmation| {
let result = match confirmation.payload {
ConfirmationPayload::Transaction(mut request) => {
// apply modification
if let Some(gas_price) = modification.gas_price {
request.gas_price = gas_price.into();
}
unlock_sign_and_dispatch(&*client, &*miner, request.into(), &*accounts, pass)
},
ConfirmationPayload::Sign(address, hash) => {
signature_with_password(&*accounts, address, hash, pass)
}
};
if let Ok(ref response) = result {
queue.request_confirmed(id, Ok(response.clone()));
}
result
}).unwrap_or_else(|| Err(Error::invalid_params()))
@ -88,7 +96,7 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
)
}
fn reject_transaction(&self, params: Params) -> Result<Value, Error> {
fn reject_request(&self, params: Params) -> Result<Value, Error> {
try!(self.active());
from_params::<(U256, )>(params).and_then(
|(id, )| {

View File

@ -121,7 +121,7 @@ impl EthTester {
&account_provider,
&miner_service,
&external_miner,
true
Default::default(),
);
let eth_sign = EthSigningUnsafeClient::new(
&client,

View File

@ -181,8 +181,9 @@ impl MinerService for TestMinerService {
unimplemented!();
}
fn map_sealing_work<F, T>(&self, _chain: &MiningBlockChainClient, _f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
None
fn map_sealing_work<F, T>(&self, chain: &MiningBlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
let open_block = chain.prepare_open_block(self.author(), *self.gas_range_target.write(), self.extra_data());
Some(f(&open_block.close()))
}
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> {
@ -205,6 +206,10 @@ impl MinerService for TestMinerService {
self.last_nonces.read().get(address).cloned()
}
fn is_sealing(&self) -> bool {
false
}
/// Submit `seal` as a valid solution for the header of `pow_hash`.
/// Will check the seal, but not actually insert the block into the chain.
fn submit_seal(&self, _chain: &MiningBlockChainClient, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> {

View File

@ -17,10 +17,11 @@
use std::str::FromStr;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Instant, Duration};
use jsonrpc_core::IoHandler;
use util::hash::{Address, H256, FixedHash};
use util::numbers::{Uint, U256};
use util::RwLock;
use util::Mutex;
use ethcore::account_provider::AccountProvider;
use ethcore::client::{TestBlockChainClient, EachBlockWith, Executed, TransactionID};
use ethcore::log_entry::{LocalizedLogEntry, LogEntry};
@ -28,7 +29,7 @@ use ethcore::receipt::LocalizedReceipt;
use ethcore::transaction::{Transaction, Action};
use ethcore::miner::{ExternalMiner, MinerService};
use ethsync::SyncState;
use v1::{Eth, EthClient, EthSigning, EthSigningUnsafeClient};
use v1::{Eth, EthClient, EthClientOptions, EthSigning, EthSigningUnsafeClient};
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService};
use rustc_serialize::hex::ToHex;
@ -57,19 +58,25 @@ struct EthTester {
pub sync: Arc<TestSyncProvider>,
pub accounts_provider: Arc<AccountProvider>,
pub miner: Arc<TestMinerService>,
hashrates: Arc<RwLock<HashMap<H256, U256>>>,
hashrates: Arc<Mutex<HashMap<H256, (Instant, U256)>>>,
pub io: IoHandler,
}
impl Default for EthTester {
fn default() -> Self {
Self::new_with_options(Default::default())
}
}
impl EthTester {
pub fn new_with_options(options: EthClientOptions) -> Self {
let client = blockchain_client();
let sync = sync_provider();
let ap = accounts_provider();
let miner = miner_service();
let hashrates = Arc::new(RwLock::new(HashMap::new()));
let hashrates = Arc::new(Mutex::new(HashMap::new()));
let external_miner = Arc::new(ExternalMiner::new(hashrates.clone()));
let eth = EthClient::new(&client, &sync, &ap, &miner, &external_miner, true).to_delegate();
let eth = EthClient::new(&client, &sync, &ap, &miner, &external_miner, options).to_delegate();
let sign = EthSigningUnsafeClient::new(&client, &ap, &miner).to_delegate();
let io = IoHandler::new();
io.add_delegate(eth);
@ -133,9 +140,9 @@ fn rpc_eth_syncing() {
#[test]
fn rpc_eth_hashrate() {
let tester = EthTester::default();
tester.hashrates.write().insert(H256::from(0), U256::from(0xfffa));
tester.hashrates.write().insert(H256::from(0), U256::from(0xfffb));
tester.hashrates.write().insert(H256::from(1), U256::from(0x1));
tester.hashrates.lock().insert(H256::from(0), (Instant::now() + Duration::from_secs(2), U256::from(0xfffa)));
tester.hashrates.lock().insert(H256::from(0), (Instant::now() + Duration::from_secs(2), U256::from(0xfffb)));
tester.hashrates.lock().insert(H256::from(1), (Instant::now() + Duration::from_secs(2), U256::from(0x1)));
let request = r#"{"jsonrpc": "2.0", "method": "eth_hashrate", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0xfffc","id":1}"#;
@ -158,8 +165,8 @@ fn rpc_eth_submit_hashrate() {
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(tester.io.handle_request(request), Some(response.to_owned()));
assert_eq!(tester.hashrates.read().get(&H256::from("0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c")).cloned(),
Some(U256::from(0x500_000)));
assert_eq!(tester.hashrates.lock().get(&H256::from("0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c")).cloned().unwrap().1,
U256::from(0x500_000));
}
#[test]
@ -210,16 +217,11 @@ fn rpc_eth_author() {
#[test]
fn rpc_eth_mining() {
let tester = EthTester::default();
tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap());
let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#;
assert_eq!(tester.io.handle_request(request), Some(response.to_owned()));
tester.hashrates.write().insert(H256::from(1), U256::from(0x1));
let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(tester.io.handle_request(request), Some(response.to_owned()));
}
#[test]
@ -299,7 +301,7 @@ fn rpc_eth_storage_at() {
"params": ["0x0000000000000000000000000000000000000001", "0x4", "latest"],
"id": 1
}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x07","id":1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000007","id":1}"#;
assert_eq!(tester.io.handle_request(request), Some(response.to_owned()));
}
@ -794,15 +796,26 @@ fn returns_no_work_if_cant_mine() {
}
#[test]
fn returns_error_if_can_mine_and_no_closed_block() {
use ethsync::{SyncState};
fn returns_correct_work_package() {
let eth_tester = EthTester::default();
eth_tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap());
eth_tester.sync.status.write().state = SyncState::Idle;
let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error","data":null},"id":1}"#;
let response = r#"{"jsonrpc":"2.0","result":["0x3bbe93f74e7b97ae00784aeff8819c5cb600dd87e8b282a5d3446f3f871f0347","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x01"],"id":1}"#;
assert_eq!(eth_tester.io.handle_request(request), Some(response.to_owned()));
}
#[test]
fn should_not_return_block_number() {
let eth_tester = EthTester::new_with_options(EthClientOptions {
allow_pending_receipt_query: true,
send_block_number_in_get_work: false,
});
eth_tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap());
let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":["0x3bbe93f74e7b97ae00784aeff8819c5cb600dd87e8b282a5d3446f3f871f0347","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000"],"id":1}"#;
assert_eq!(eth_tester.io.handle_request(request), Some(response.to_owned()));
}

View File

@ -16,13 +16,14 @@
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use jsonrpc_core::IoHandler;
use v1::impls::EthSigningQueueClient;
use v1::traits::EthSigning;
use v1::helpers::{ConfirmationsQueue, SigningQueue};
use v1::tests::helpers::TestMinerService;
use util::{Address, FixedHash};
use util::numbers::{Uint, U256};
use util::numbers::{Uint, U256, H256};
use ethcore::account_provider::AccountProvider;
use ethcore::client::TestBlockChainClient;
use ethcore::transaction::{Transaction, Action};
@ -37,7 +38,7 @@ struct EthSigningTester {
impl Default for EthSigningTester {
fn default() -> Self {
let queue = Arc::new(ConfirmationsQueue::default());
let queue = Arc::new(ConfirmationsQueue::with_timeout(Duration::from_millis(1)));
let client = Arc::new(TestBlockChainClient::default());
let miner = Arc::new(TestMinerService::default());
let accounts = Arc::new(AccountProvider::transient_provider());
@ -58,6 +59,78 @@ fn eth_signing() -> EthSigningTester {
EthSigningTester::default()
}
#[test]
fn should_add_sign_to_queue() {
// given
let tester = eth_signing();
let address = Address::random();
assert_eq!(tester.queue.requests().len(), 0);
// when
let request = r#"{
"jsonrpc": "2.0",
"method": "eth_sign",
"params": [
""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"",
"0x0000000000000000000000000000000000000000000000000000000000000005"
],
"id": 1
}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","id":1}"#;
// then
assert_eq!(tester.io.handle_request(&request), Some(response.to_owned()));
assert_eq!(tester.queue.requests().len(), 1);
}
#[test]
fn should_post_sign_to_queue() {
// given
let tester = eth_signing();
let address = Address::random();
assert_eq!(tester.queue.requests().len(), 0);
// when
let request = r#"{
"jsonrpc": "2.0",
"method": "eth_postSign",
"params": [
""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"",
"0x0000000000000000000000000000000000000000000000000000000000000005"
],
"id": 1
}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x01","id":1}"#;
// then
assert_eq!(tester.io.handle_request(&request), Some(response.to_owned()));
assert_eq!(tester.queue.requests().len(), 1);
}
#[test]
fn should_sign_if_account_is_unlocked() {
// given
let tester = eth_signing();
let hash: H256 = 5.into();
let acc = tester.accounts.new_account("test").unwrap();
tester.accounts.unlock_account_permanently(acc, "test".into()).unwrap();
let signature = tester.accounts.sign(acc, hash).unwrap();
// when
let request = r#"{
"jsonrpc": "2.0",
"method": "eth_sign",
"params": [
""#.to_owned() + format!("0x{:?}", acc).as_ref() + r#"",
""# + format!("0x{:?}", hash).as_ref() + r#""
],
"id": 1
}"#;
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", signature).as_ref() + r#"","id":1}"#;
assert_eq!(tester.io.handle_request(&request), Some(response.to_owned()));
assert_eq!(tester.queue.requests().len(), 0);
}
#[test]
fn should_add_transaction_to_queue() {
@ -81,7 +154,6 @@ fn should_add_transaction_to_queue() {
}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000000000000000000000000000000","id":1}"#;
// then
assert_eq!(tester.io.handle_request(&request), Some(response.to_owned()));
assert_eq!(tester.queue.requests().len(), 1);

View File

@ -110,7 +110,7 @@ fn new_account() {
let res = tester.io.handle_request(request);
let accounts = tester.accounts.accounts();
let accounts = tester.accounts.accounts().unwrap();
assert_eq!(accounts.len(), 1);
let address = accounts[0];
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"","id":1}"#;
@ -122,7 +122,7 @@ fn new_account() {
fn should_be_able_to_get_account_info() {
let tester = setup(None);
tester.accounts.new_account("").unwrap();
let accounts = tester.accounts.accounts();
let accounts = tester.accounts.accounts().unwrap();
assert_eq!(accounts.len(), 1);
let address = accounts[0];
@ -140,7 +140,7 @@ fn should_be_able_to_get_account_info() {
fn should_be_able_to_set_name() {
let tester = setup(None);
tester.accounts.new_account("").unwrap();
let accounts = tester.accounts.accounts();
let accounts = tester.accounts.accounts().unwrap();
assert_eq!(accounts.len(), 1);
let address = accounts[0];
@ -161,7 +161,7 @@ fn should_be_able_to_set_name() {
fn should_be_able_to_set_meta() {
let tester = setup(None);
tester.accounts.new_account("").unwrap();
let accounts = tester.accounts.accounts();
let accounts = tester.accounts.accounts().unwrap();
assert_eq!(accounts.len(), 1);
let address = accounts[0];

View File

@ -23,7 +23,7 @@ use ethcore::client::TestBlockChainClient;
use ethcore::transaction::{Transaction, Action};
use v1::{SignerClient, PersonalSigner};
use v1::tests::helpers::TestMinerService;
use v1::helpers::{SigningQueue, ConfirmationsQueue, TransactionRequest};
use v1::helpers::{SigningQueue, ConfirmationsQueue, FilledTransactionRequest, ConfirmationPayload};
struct PersonalSignerTester {
queue: Arc<ConfirmationsQueue>,
@ -68,22 +68,28 @@ fn signer_tester() -> PersonalSignerTester {
#[test]
fn should_return_list_of_transactions_in_queue() {
fn should_return_list_of_items_to_confirm() {
// given
let tester = signer_tester();
tester.queue.add_request(TransactionRequest {
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
from: Address::from(1),
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
gas_price: Some(U256::from(10_000)),
gas: Some(U256::from(10_000_000)),
value: Some(U256::from(1)),
data: None,
gas_price: U256::from(10_000),
gas: U256::from(10_000_000),
value: U256::from(1),
data: vec![],
nonce: None,
});
}));
tester.queue.add_request(ConfirmationPayload::Sign(1.into(), 5.into()));
// when
let request = r#"{"jsonrpc":"2.0","method":"personal_transactionsToConfirm","params":[],"id":1}"#;
let response = r#"{"jsonrpc":"2.0","result":[{"id":"0x01","transaction":{"data":null,"from":"0x0000000000000000000000000000000000000001","gas":"0x989680","gasPrice":"0x2710","nonce":null,"to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","value":"0x01"}}],"id":1}"#;
let request = r#"{"jsonrpc":"2.0","method":"personal_requestsToConfirm","params":[],"id":1}"#;
let response = concat!(
r#"{"jsonrpc":"2.0","result":["#,
r#"{"id":"0x01","payload":{"transaction":{"data":"0x","from":"0x0000000000000000000000000000000000000001","gas":"0x989680","gasPrice":"0x2710","nonce":null,"to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","value":"0x01"}}},"#,
r#"{"id":"0x02","payload":{"sign":{"address":"0x0000000000000000000000000000000000000001","hash":"0x0000000000000000000000000000000000000000000000000000000000000005"}}}"#,
r#"],"id":1}"#
);
// then
assert_eq!(tester.io.handle_request(&request), Some(response.to_owned()));
@ -94,19 +100,19 @@ fn should_return_list_of_transactions_in_queue() {
fn should_reject_transaction_from_queue_without_dispatching() {
// given
let tester = signer_tester();
tester.queue.add_request(TransactionRequest {
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
from: Address::from(1),
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
gas_price: Some(U256::from(10_000)),
gas: Some(U256::from(10_000_000)),
value: Some(U256::from(1)),
data: None,
gas_price: U256::from(10_000),
gas: U256::from(10_000_000),
value: U256::from(1),
data: vec![],
nonce: None,
});
}));
assert_eq!(tester.queue.requests().len(), 1);
// when
let request = r#"{"jsonrpc":"2.0","method":"personal_rejectTransaction","params":["0x01"],"id":1}"#;
let request = r#"{"jsonrpc":"2.0","method":"personal_rejectRequest","params":["0x01"],"id":1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
// then
@ -119,19 +125,35 @@ fn should_reject_transaction_from_queue_without_dispatching() {
fn should_not_remove_transaction_if_password_is_invalid() {
// given
let tester = signer_tester();
tester.queue.add_request(TransactionRequest {
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
from: Address::from(1),
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
gas_price: Some(U256::from(10_000)),
gas: Some(U256::from(10_000_000)),
value: Some(U256::from(1)),
data: None,
gas_price: U256::from(10_000),
gas: U256::from(10_000_000),
value: U256::from(1),
data: vec![],
nonce: None,
});
}));
assert_eq!(tester.queue.requests().len(), 1);
// when
let request = r#"{"jsonrpc":"2.0","method":"personal_confirmTransaction","params":["0x01",{},"xxx"],"id":1}"#;
let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x01",{},"xxx"],"id":1}"#;
let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidAccount)"},"id":1}"#;
// then
assert_eq!(tester.io.handle_request(&request), Some(response.to_owned()));
assert_eq!(tester.queue.requests().len(), 1);
}
#[test]
fn should_not_remove_sign_if_password_is_invalid() {
// given
let tester = signer_tester();
tester.queue.add_request(ConfirmationPayload::Sign(0.into(), 5.into()));
assert_eq!(tester.queue.requests().len(), 1);
// when
let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x01",{},"xxx"],"id":1}"#;
let response = r#"{"jsonrpc":"2.0","error":{"code":-32021,"message":"Account password is invalid or account does not exist.","data":"SStore(InvalidAccount)"},"id":1}"#;
// then
@ -145,15 +167,15 @@ fn should_confirm_transaction_and_dispatch() {
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.queue.add_request(TransactionRequest {
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
from: address,
to: Some(recipient),
gas_price: Some(U256::from(10_000)),
gas: Some(U256::from(10_000_000)),
value: Some(U256::from(1)),
data: None,
gas_price: U256::from(10_000),
gas: U256::from(10_000_000),
value: U256::from(1),
data: vec![],
nonce: None,
});
}));
let t = Transaction {
nonce: U256::zero(),
@ -172,7 +194,7 @@ fn should_confirm_transaction_and_dispatch() {
// when
let request = r#"{
"jsonrpc":"2.0",
"method":"personal_confirmTransaction",
"method":"personal_confirmRequest",
"params":["0x01", {"gasPrice":"0x1000"}, "test"],
"id":1
}"#;

View File

@ -206,26 +206,31 @@ pub trait EthSigning: Sized + Send + Sync + 'static {
/// Signs the data with given address signature.
fn sign(&self, _: Params) -> Result<Value, Error>;
/// Posts sign request asynchronously.
/// Will return a confirmation ID for later use with check_transaction.
fn post_sign(&self, _: Params) -> Result<Value, Error>;
/// Sends transaction; will block for 20s to try to return the
/// transaction hash.
/// If it cannot yet be signed, it will return a transaction ID for
/// later use with check_transaction.
/// later use with check_transaction.
fn send_transaction(&self, _: Params) -> Result<Value, Error>;
/// Posts transaction asynchronously.
/// Will return a transaction ID for later use with check_transaction.
/// Will return a transaction ID for later use with check_transaction.
fn post_transaction(&self, _: Params) -> Result<Value, Error>;
/// Checks the progress of a previously posted transaction.
/// Should be given a valid send_transaction ID.
/// Returns the transaction hash, the zero hash (not yet available),
/// or an error.
/// or an error.
fn check_transaction(&self, _: Params) -> Result<Value, Error>;
/// Should be used to convert object to io delegate.
fn to_delegate(self) -> IoDelegate<Self> {
let mut delegate = IoDelegate::new(Arc::new(self));
delegate.add_method("eth_sign", EthSigning::sign);
delegate.add_method("eth_postSign", EthSigning::post_sign);
delegate.add_method("eth_sendTransaction", EthSigning::send_transaction);
delegate.add_method("eth_postTransaction", EthSigning::post_transaction);
delegate.add_method("eth_checkTransaction", EthSigning::check_transaction);

View File

@ -61,24 +61,24 @@ pub trait Personal: Sized + Send + Sync + 'static {
}
}
/// Personal extension for transactions confirmations rpc interface.
/// Personal extension for confirmations rpc interface.
pub trait PersonalSigner: Sized + Send + Sync + 'static {
/// Returns a list of transactions to confirm.
fn transactions_to_confirm(&self, _: Params) -> Result<Value, Error>;
/// Returns a list of items to confirm.
fn requests_to_confirm(&self, _: Params) -> Result<Value, Error>;
/// Confirm and send a specific transaction.
fn confirm_transaction(&self, _: Params) -> Result<Value, Error>;
/// Confirm specific request.
fn confirm_request(&self, _: Params) -> Result<Value, Error>;
/// Reject the transaction request.
fn reject_transaction(&self, _: Params) -> Result<Value, Error>;
/// Reject the confirmation request.
fn reject_request(&self, _: Params) -> Result<Value, Error>;
/// Should be used to convert object to io delegate.
fn to_delegate(self) -> IoDelegate<Self> {
let mut delegate = IoDelegate::new(Arc::new(self));
delegate.add_method("personal_transactionsToConfirm", PersonalSigner::transactions_to_confirm);
delegate.add_method("personal_confirmTransaction", PersonalSigner::confirm_transaction);
delegate.add_method("personal_rejectTransaction", PersonalSigner::reject_transaction);
delegate.add_method("personal_requestsToConfirm", PersonalSigner::requests_to_confirm);
delegate.add_method("personal_confirmRequest", PersonalSigner::confirm_request);
delegate.add_method("personal_rejectRequest", PersonalSigner::reject_request);
delegate
}
}

View File

@ -0,0 +1,150 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Types used in Confirmations queue (Trusted Signer)
use v1::types::{U256, TransactionRequest, H160, H256};
use v1::helpers;
/// Confirmation waiting in a queue
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize)]
pub struct ConfirmationRequest {
/// Id of this confirmation
pub id: U256,
/// Payload
pub payload: ConfirmationPayload,
}
impl From<helpers::ConfirmationRequest> for ConfirmationRequest {
fn from(c: helpers::ConfirmationRequest) -> Self {
ConfirmationRequest {
id: c.id.into(),
payload: c.payload.into(),
}
}
}
/// Sign request
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize)]
pub struct SignRequest {
/// Address
pub address: H160,
/// Hash to sign
pub hash: H256,
}
/// Confirmation payload, i.e. the thing to be confirmed
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize)]
pub enum ConfirmationPayload {
/// Transaction
#[serde(rename="transaction")]
Transaction(TransactionRequest),
/// Signature
#[serde(rename="sign")]
Sign(SignRequest),
}
impl From<helpers::ConfirmationPayload> for ConfirmationPayload {
fn from(c: helpers::ConfirmationPayload) -> Self {
match c {
helpers::ConfirmationPayload::Transaction(t) => ConfirmationPayload::Transaction(t.into()),
helpers::ConfirmationPayload::Sign(address, hash) => ConfirmationPayload::Sign(SignRequest {
address: address.into(),
hash: hash.into(),
}),
}
}
}
/// Possible modifications to the confirmed transaction sent by `Trusted Signer`
#[derive(Debug, PartialEq, Deserialize)]
pub struct TransactionModification {
/// Modified gas price
#[serde(rename="gasPrice")]
pub gas_price: Option<U256>,
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use serde_json;
use v1::types::U256;
use v1::helpers;
use super::*;
#[test]
fn should_serialize_sign_confirmation() {
// given
let request = helpers::ConfirmationRequest {
id: 15.into(),
payload: helpers::ConfirmationPayload::Sign(1.into(), 5.into()),
};
// when
let res = serde_json::to_string(&ConfirmationRequest::from(request));
let expected = r#"{"id":"0x0f","payload":{"sign":{"address":"0x0000000000000000000000000000000000000001","hash":"0x0000000000000000000000000000000000000000000000000000000000000005"}}}"#;
// then
assert_eq!(res.unwrap(), expected.to_owned());
}
#[test]
fn should_serialize_transaction_confirmation() {
// given
let request = helpers::ConfirmationRequest {
id: 15.into(),
payload: helpers::ConfirmationPayload::Transaction(helpers::FilledTransactionRequest {
from: 0.into(),
to: None,
gas: 15_000.into(),
gas_price: 10_000.into(),
value: 100_000.into(),
data: vec![1, 2, 3],
nonce: Some(1.into()),
}),
};
// when
let res = serde_json::to_string(&ConfirmationRequest::from(request));
let expected = r#"{"id":"0x0f","payload":{"transaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x0186a0","data":"0x010203","nonce":"0x01"}}}"#;
// then
assert_eq!(res.unwrap(), expected.to_owned());
}
#[test]
fn should_deserialize_modification() {
// given
let s1 = r#"{
"gasPrice":"0x0ba43b7400"
}"#;
let s2 = r#"{}"#;
// when
let res1: TransactionModification = serde_json::from_str(s1).unwrap();
let res2: TransactionModification = serde_json::from_str(s2).unwrap();
// then
assert_eq!(res1, TransactionModification {
gas_price: Some(U256::from_str("0ba43b7400").unwrap()),
});
assert_eq!(res2, TransactionModification {
gas_price: None,
});
}
}

View File

@ -17,6 +17,8 @@
mod bytes;
mod block;
mod block_number;
mod call_request;
mod confirmations;
mod filter;
mod hash;
mod index;
@ -24,7 +26,6 @@ mod log;
mod sync;
mod transaction;
mod transaction_request;
mod call_request;
mod receipt;
mod trace;
mod trace_filter;
@ -33,14 +34,15 @@ mod uint;
pub use self::bytes::Bytes;
pub use self::block::{Block, BlockTransactions};
pub use self::block_number::BlockNumber;
pub use self::call_request::CallRequest;
pub use self::confirmations::{ConfirmationPayload, ConfirmationRequest, TransactionModification};
pub use self::filter::Filter;
pub use self::hash::{H64, H160, H256, H520, H2048};
pub use self::index::Index;
pub use self::log::Log;
pub use self::sync::{SyncStatus, SyncInfo};
pub use self::transaction::Transaction;
pub use self::transaction_request::{TransactionRequest, TransactionConfirmation, TransactionModification};
pub use self::call_request::CallRequest;
pub use self::transaction_request::TransactionRequest;
pub use self::receipt::Receipt;
pub use self::trace::{LocalizedTrace, TraceResults};
pub use self::trace_filter::TraceFilter;

View File

@ -17,7 +17,7 @@
//! `TransactionRequest` type
use v1::types::{Bytes, H160, U256};
use v1::helpers::{TransactionRequest as Request, TransactionConfirmation as Confirmation};
use v1::helpers;
/// Transaction request coming from RPC
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Serialize, Deserialize)]
@ -39,8 +39,8 @@ pub struct TransactionRequest {
pub nonce: Option<U256>,
}
impl From<Request> for TransactionRequest {
fn from(r: Request) -> Self {
impl From<helpers::TransactionRequest> for TransactionRequest {
fn from(r: helpers::TransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
@ -53,9 +53,23 @@ impl From<Request> for TransactionRequest {
}
}
impl Into<Request> for TransactionRequest {
fn into(self) -> Request {
Request {
impl From<helpers::FilledTransactionRequest> for TransactionRequest {
fn from(r: helpers::FilledTransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
gas_price: Some(r.gas_price.into()),
gas: Some(r.gas.into()),
value: Some(r.value.into()),
data: Some(r.data.into()),
nonce: r.nonce.map(Into::into),
}
}
}
impl Into<helpers::TransactionRequest> for TransactionRequest {
fn into(self) -> helpers::TransactionRequest {
helpers::TransactionRequest {
from: self.from.into(),
to: self.to.map(Into::into),
gas_price: self.gas_price.map(Into::into),
@ -67,32 +81,6 @@ impl Into<Request> for TransactionRequest {
}
}
/// Transaction confirmation waiting in a queue
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Serialize)]
pub struct TransactionConfirmation {
/// Id of this confirmation
pub id: U256,
/// TransactionRequest
pub transaction: TransactionRequest,
}
impl From<Confirmation> for TransactionConfirmation {
fn from(c: Confirmation) -> Self {
TransactionConfirmation {
id: c.id.into(),
transaction: c.transaction.into(),
}
}
}
/// Possible modifications to the confirmed transaction sent by `SignerUI`
#[derive(Debug, PartialEq, Deserialize)]
pub struct TransactionModification {
/// Modified gas price
#[serde(rename="gasPrice")]
pub gas_price: Option<U256>,
}
#[cfg(test)]
mod tests {
@ -188,7 +176,6 @@ mod tests {
});
}
#[test]
fn transaction_request_deserialize_error() {
let s = r#"{
@ -203,26 +190,5 @@ mod tests {
assert!(deserialized.is_err(), "Should be error because to is empty");
}
#[test]
fn should_deserialize_modification() {
// given
let s1 = r#"{
"gasPrice":"0x0ba43b7400"
}"#;
let s2 = r#"{}"#;
// when
let res1: TransactionModification = serde_json::from_str(s1).unwrap();
let res2: TransactionModification = serde_json::from_str(s2).unwrap();
// then
assert_eq!(res1, TransactionModification {
gas_price: Some(U256::from_str("0ba43b7400").unwrap()),
});
assert_eq!(res2, TransactionModification {
gas_price: None,
});
}
}

View File

@ -12,4 +12,6 @@ export TARGETS="
-p ethstore \
-p ethsync \
-p ethcore-ipc \
-p ethcore-ipc-tests \
-p ethcore-ipc-nano \
-p parity"

View File

@ -53,6 +53,7 @@ pub struct ServerBuilder {
queue: Arc<ConfirmationsQueue>,
handler: Arc<IoHandler>,
authcodes_path: PathBuf,
skip_origin_validation: bool,
}
impl Extendable for ServerBuilder {
@ -68,13 +69,21 @@ impl ServerBuilder {
queue: queue,
handler: Arc::new(IoHandler::new()),
authcodes_path: authcodes_path,
skip_origin_validation: false,
}
}
/// If set to `true` server will not verify Origin of incoming requests.
/// Not recommended. Use only for development.
pub fn skip_origin_validation(mut self, skip: bool) -> Self {
self.skip_origin_validation = skip;
self
}
/// Starts a new `WebSocket` server in separate thread.
/// Returns a `Server` handle which closes the server when droped.
pub fn start(self, addr: SocketAddr) -> Result<Server, ServerError> {
Server::start(addr, self.handler, self.queue, self.authcodes_path)
Server::start(addr, self.handler, self.queue, self.authcodes_path, self.skip_origin_validation)
}
}
@ -89,10 +98,10 @@ pub struct Server {
impl Server {
/// Starts a new `WebSocket` server in separate thread.
/// Returns a `Server` handle which closes the server when droped.
fn start(addr: SocketAddr, handler: Arc<IoHandler>, queue: Arc<ConfirmationsQueue>, authcodes_path: PathBuf) -> Result<Server, ServerError> {
fn start(addr: SocketAddr, handler: Arc<IoHandler>, queue: Arc<ConfirmationsQueue>, authcodes_path: PathBuf, skip_origin_validation: bool) -> Result<Server, ServerError> {
let config = {
let mut config = ws::Settings::default();
// It's also used for handling min-sysui requests (browser can make many of them in paralel)
// accept only handshakes beginning with GET
config.method_strict = true;
// Was shutting down server when suspending on linux:
config.shutdown_on_interrupt = false;
@ -101,7 +110,9 @@ impl Server {
// Create WebSocket
let origin = format!("{}", addr);
let ws = try!(ws::Builder::new().with_settings(config).build(session::Factory::new(handler, origin, authcodes_path)));
let ws = try!(ws::Builder::new().with_settings(config).build(
session::Factory::new(handler, origin, authcodes_path, skip_origin_validation)
));
let panic_handler = PanicHandler::new_in_arc();
let ph = panic_handler.clone();

Some files were not shown because too many files have changed in this diff Show More