Merge branch 'master' into td-ui-2

This commit is contained in:
Tomasz Drwięga 2017-10-18 16:45:37 +02:00
commit 195305ce2e
No known key found for this signature in database
GPG Key ID: D066F497E62CAF66
49 changed files with 280 additions and 310 deletions

27
Cargo.lock generated
View File

@ -539,6 +539,7 @@ dependencies = [
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
"journaldb 0.1.0",
"kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"kvdb-rocksdb 0.1.0",
@ -772,6 +773,7 @@ dependencies = [
"hash 0.1.0",
"hashdb 0.1.0",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"journaldb 0.1.0",
"kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1252,6 +1254,25 @@ name = "itoa"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "journaldb"
version = "0.1.0"
dependencies = [
"ethcore-bigint 0.1.3",
"ethcore-bytes 0.1.0",
"ethcore-logger 1.9.0",
"hash 0.1.0",
"hashdb 0.1.0",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"memorydb 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.2.0",
"util-error 0.1.0",
]
[[package]]
name = "jsonrpc-core"
version = "8.0.0"
@ -1912,7 +1933,7 @@ dependencies = [
[[package]]
name = "parity"
version = "1.8.0"
version = "1.9.0"
dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1941,6 +1962,7 @@ dependencies = [
"hash 0.1.0",
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
"isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"journaldb 0.1.0",
"jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"kvdb 0.1.0",
"kvdb-rocksdb 0.1.0",
@ -2137,6 +2159,7 @@ dependencies = [
"ethstore 0.1.0",
"ethsync 1.9.0",
"fetch 0.1.0",
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hardware-wallet 1.9.0",
"hash 0.1.0",
@ -2242,7 +2265,7 @@ dependencies = [
[[package]]
name = "parity-ui-precompiled"
version = "1.4.0"
source = "git+https://github.com/paritytech/js-precompiled.git#4b77a23c3e55aed45725f43cd2a499676375b995"
source = "git+https://github.com/paritytech/js-precompiled.git#fe0b4dbdfe6e7ebebb247d565d937fd0a0feca5f"
dependencies = [
"parity-dapps-glue 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]

View File

@ -1,7 +1,7 @@
[package]
description = "Parity Ethereum client"
name = "parity"
version = "1.8.0"
version = "1.9.0"
license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"
@ -62,6 +62,7 @@ hash = { path = "util/hash" }
migration = { path = "util/migration" }
kvdb = { path = "util/kvdb" }
kvdb-rocksdb = { path = "util/kvdb-rocksdb" }
journaldb = { path = "util/journaldb" }
parity-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.103", optional = true}

View File

@ -13,7 +13,7 @@ futures = "0.1"
futures-cpupool = "0.1"
linked-hash-map = "0.5"
log = "0.3"
parity-dapps-glue = "1.8"
parity-dapps-glue = "1.9"
parking_lot = "0.4"
mime_guess = "2.0.0-alpha.2"
rand = "0.3"

View File

@ -71,6 +71,7 @@ hash = { path = "../util/hash" }
triehash = { path = "../util/triehash" }
semantic_version = { path = "../util/semantic_version" }
unexpected = { path = "../util/unexpected" }
journaldb = { path = "../util/journaldb" }
[dev-dependencies]
native-contracts = { path = "native_contracts", features = ["test_contracts"] }

@ -1 +1 @@
Subproject commit 9b722a014a2b2c9ea6eac456fe01a5c3dd1042a8
Subproject commit b6011c3fb567d7178915574de0a8d4b5331fe725

View File

@ -26,7 +26,8 @@ use itertools::Itertools;
use hash::keccak;
use timer::PerfTimer;
use bytes::Bytes;
use util::{Address, journaldb, DBValue};
use util::{Address, DBValue};
use journaldb;
use util_error::UtilError;
use trie::{TrieSpec, TrieFactory, Trie};
use kvdb::{KeyValueDB, DBTransaction};

View File

@ -20,7 +20,7 @@ use std::fmt::{Display, Formatter, Error as FmtError};
use mode::Mode as IpcMode;
use verification::{VerifierType, QueueConfig};
use util::journaldb;
use journaldb;
use kvdb_rocksdb::CompactionProfile;
pub use std::time::Duration;

View File

@ -20,7 +20,7 @@ use std::fmt;
use std::sync::Arc;
use bigint::prelude::U256;
use bigint::hash::H256;
use util::journaldb;
use journaldb;
use {trie, kvdb_memorydb, bytes};
use kvdb::{self, KeyValueDB};
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};

View File

@ -26,7 +26,8 @@ use hash::keccak;
use bigint::prelude::U256;
use bigint::hash::H256;
use parking_lot::RwLock;
use util::*;
use journaldb;
use util::{Address, DBValue};
use kvdb_rocksdb::{Database, DatabaseConfig};
use bytes::Bytes;
use rlp::*;

View File

@ -131,6 +131,7 @@ extern crate vm;
extern crate wasm;
extern crate ethcore_util as util;
extern crate memory_cache;
extern crate journaldb;
#[macro_use]
extern crate macros;

View File

@ -23,7 +23,7 @@ use trie::TrieDB;
use views::HeaderView;
use bloom_journal::Bloom;
use migration::{Error, Migration, Progress, Batch, Config, ErrorKind};
use util::journaldb;
use journaldb;
use bigint::hash::H256;
use trie::Trie;
use kvdb::{DBTransaction, ResultExt};

View File

@ -36,7 +36,7 @@ use util::{HashDB, DBValue};
use snappy;
use bytes::Bytes;
use parking_lot::Mutex;
use util::journaldb::{self, Algorithm, JournalDB};
use journaldb::{self, Algorithm, JournalDB};
use kvdb::KeyValueDB;
use trie::{TrieDB, TrieDBMut, Trie, TrieMut};
use rlp::{RlpStream, UntrustedRlp};

View File

@ -39,7 +39,7 @@ use bigint::hash::H256;
use parking_lot::{Mutex, RwLock, RwLockReadGuard};
use util_error::UtilError;
use bytes::Bytes;
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use kvdb_rocksdb::{Database, DatabaseConfig};
use snappy;
@ -625,7 +625,7 @@ mod tests {
use io::{IoService};
use devtools::RandomTempPath;
use tests::helpers::get_test_spec;
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use error::Error;
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
use super::*;

View File

@ -35,7 +35,7 @@ use util::DBValue;
use kvdb::KeyValueDB;
use bigint::hash::H256;
use hashdb::HashDB;
use util::journaldb;
use journaldb;
use trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode};
use trie::{TrieDB, TrieDBMut, Trie};

View File

@ -69,7 +69,7 @@ fn restored_is_equivalent() {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
db_config: db_config,
pruning: ::util::journaldb::Algorithm::Archive,
pruning: ::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: path,
db_restore: client2.clone(),
@ -112,7 +112,7 @@ fn guards_delete_folders() {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS),
pruning: ::util::journaldb::Algorithm::Archive,
pruning: ::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: path.clone(),
db_restore: Arc::new(NoopDBRestore),

View File

@ -26,7 +26,7 @@ use error::Error;
use rand::{XorShiftRng, SeedableRng};
use bigint::hash::H256;
use util::journaldb::{self, Algorithm};
use journaldb::{self, Algorithm};
use kvdb_rocksdb::{Database, DatabaseConfig};
use memorydb::MemoryDB;
use parking_lot::Mutex;

View File

@ -670,7 +670,7 @@ impl Spec {
/// constructor.
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
use transaction::{Action, Transaction};
use util::journaldb;
use journaldb;
use kvdb_memorydb;
let genesis = self.genesis_header();

View File

@ -18,7 +18,7 @@ use std::collections::{VecDeque, HashSet};
use std::sync::Arc;
use lru_cache::LruCache;
use memory_cache::MemoryLruCache;
use util::journaldb::JournalDB;
use journaldb::JournalDB;
use kvdb::{KeyValueDB, DBTransaction};
use bigint::hash::H256;
use hashdb::HashDB;

View File

@ -36,7 +36,6 @@ use state_db::StateDB;
use state::*;
use std::sync::Arc;
use transaction::{Action, Transaction, SignedTransaction};
use util::*;
use views::BlockView;
// TODO: move everything over to get_null_spec.
@ -282,7 +281,7 @@ pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::Sta
pub fn get_temp_state_db() -> StateDB {
let db = new_db();
let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
let journal_db = ::journaldb::new(db, ::journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
StateDB::new(journal_db, 5 * 1024 * 1024)
}

View File

@ -1 +1 @@
// test script 11
// test script 12

View File

@ -17,7 +17,7 @@
use std::fs;
use std::path::{PathBuf, Path};
use bigint::hash::{H64, H256};
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use helpers::{replace_home, replace_home_and_local};
use app_dirs::{AppInfo, get_app_root, AppDataType};

View File

@ -22,7 +22,7 @@ use bigint::prelude::U256;
use bigint::hash::clean_0x;
use util::Address;
use kvdb_rocksdb::CompactionProfile;
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
use cache::CacheConfig;

View File

@ -76,6 +76,7 @@ extern crate path;
extern crate rpc_cli;
extern crate node_filter;
extern crate hash;
extern crate journaldb;
#[macro_use]
extern crate log as rlog;

View File

@ -20,7 +20,7 @@ use std::io::{Read, Write, Error as IoError, ErrorKind};
use std::path::{Path, PathBuf};
use std::fmt::{Display, Formatter, Error as FmtError};
use std::sync::Arc;
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use migr::{self, Manager as MigrationManager, Config as MigrationConfig, Migration};
use kvdb;
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
@ -282,7 +282,6 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
mod legacy {
use super::*;
use std::path::{Path, PathBuf};
use util::journaldb::Algorithm;
use migr::{Manager as MigrationManager};
use kvdb_rocksdb::CompactionProfile;
use ethcore::migrations;

View File

@ -18,7 +18,7 @@ use std::{str, fs, fmt};
use std::time::Duration;
use bigint::prelude::U256;
use util::{Address, version_data};
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use ethcore::spec::{Spec, SpecParams};
use ethcore::ethereum;
use ethcore::client::Mode;
@ -326,7 +326,7 @@ pub fn mode_switch_to_bool(switch: Option<Mode>, user_defaults: &UserDefaults) -
#[cfg(test)]
mod tests {
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use user_defaults::UserDefaults;
use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool};

View File

@ -244,7 +244,7 @@ impl FullDependencies {
let deps = &$deps;
let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone());
if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store)))
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &deps.secret_store)))
} else {
$handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher)))
}
@ -445,7 +445,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
let secret_store = Some(deps.secret_store.clone());
if deps.signer_service.is_enabled() {
$handler.extend_with($namespace::to_delegate(
SigningQueueClient::new(&deps.signer_service, dispatcher, &secret_store)
SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &secret_store)
))
} else {
$handler.extend_with(

View File

@ -42,7 +42,7 @@ use ansi_term::Colour;
use util::version;
use parking_lot::{Condvar, Mutex};
use node_filter::NodeFilter;
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use params::{
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,

View File

@ -25,7 +25,7 @@ use std::io::{Read, Write};
use std::path::{PathBuf, Path};
use dir::{DatabaseDirectories, default_data_path};
use helpers::replace_home;
use util::journaldb::Algorithm;
use journaldb::Algorithm;
#[cfg_attr(feature="dev", allow(enum_variant_names))]
#[derive(Debug)]

View File

@ -26,7 +26,7 @@ use serde::de::value::MapAccessDeserializer;
use serde_json::Value;
use serde_json::de::from_reader;
use serde_json::ser::to_string;
use util::journaldb::Algorithm;
use journaldb::Algorithm;
use ethcore::client::Mode;
pub struct UserDefaults {

View File

@ -10,6 +10,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ansi_term = "0.9"
cid = "0.2"
futures = "0.1.6"
futures-cpupool = "0.1"
log = "0.3"
multihash ="0.6"

View File

@ -20,6 +20,9 @@
#![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(feature="dev", plugin(clippy))]
#[macro_use]
extern crate futures;
extern crate ansi_term;
extern crate cid;
extern crate crypto as rust_crypto;

View File

@ -44,7 +44,8 @@ pub use self::requests::{
TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest,
};
pub use self::signing_queue::{
ConfirmationsQueue, ConfirmationPromise, ConfirmationResult, SigningQueue, QueueEvent, DefaultAccount,
ConfirmationsQueue, ConfirmationReceiver, ConfirmationResult,
SigningQueue, QueueEvent, DefaultAccount,
QUEUE_LIMIT as SIGNING_QUEUE_LIMIT,
};
pub use self::signer::SignerService;

View File

@ -14,20 +14,18 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::mem;
use std::cell::RefCell;
use std::sync::Arc;
use std::collections::BTreeMap;
use jsonrpc_core;
use bigint::prelude::U256;
use util::Address;
use parking_lot::{Mutex, RwLock};
use ethcore::account_provider::DappId;
use v1::helpers::{ConfirmationRequest, ConfirmationPayload};
use v1::helpers::{ConfirmationRequest, ConfirmationPayload, oneshot, errors};
use v1::types::{ConfirmationResponse, H160 as RpcH160, Origin, DappId as RpcDappId};
use jsonrpc_core::Error;
/// Result that can be returned from JSON RPC.
pub type RpcResult = Result<ConfirmationResponse, jsonrpc_core::Error>;
pub type ConfirmationResult = Result<ConfirmationResponse, Error>;
/// Type of default account
pub enum DefaultAccount {
@ -74,8 +72,9 @@ pub const QUEUE_LIMIT: usize = 50;
/// A queue of transactions awaiting to be confirmed and signed.
pub trait SigningQueue: Send + Sync {
/// Add new request to the queue.
/// Returns a `ConfirmationPromise` that can be used to await for resolution of given request.
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<ConfirmationPromise, QueueAddError>;
/// Returns a `Result` wrapping `ConfirmationReceiver` together with it's unique id in the queue.
/// `ConfirmationReceiver` is a `Future` awaiting for resolution of the given request.
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<(U256, ConfirmationReceiver), QueueAddError>;
/// Removes a request from the queue.
/// Notifies possible token holders that request was rejected.
@ -83,7 +82,7 @@ pub trait SigningQueue: Send + Sync {
/// Removes a request from the queue.
/// Notifies possible token holders that request was confirmed and given hash was assigned.
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<ConfirmationRequest>;
fn request_confirmed(&self, id: U256, result: ConfirmationResult) -> Option<ConfirmationRequest>;
/// Returns a request if it is contained in the queue.
fn peek(&self, id: &U256) -> Option<ConfirmationRequest>;
@ -98,88 +97,20 @@ pub trait SigningQueue: Send + Sync {
fn is_empty(&self) -> bool;
}
#[derive(Debug, Clone, PartialEq)]
/// Result of a pending confirmation request.
pub enum ConfirmationResult {
/// The request has not yet been confirmed nor rejected.
Waiting,
/// The request has been rejected.
Rejected,
/// The request has been confirmed.
Confirmed(RpcResult),
}
type Listener = Box<FnMut(Option<RpcResult>) + Send>;
/// A handle to submitted request.
/// Allows to block and wait for a resolution of that request.
pub struct ConfirmationToken {
result: Arc<Mutex<ConfirmationResult>>,
listeners: Arc<Mutex<Vec<Listener>>>,
struct ConfirmationSender {
sender: oneshot::Sender<ConfirmationResult>,
request: ConfirmationRequest,
}
pub struct ConfirmationPromise {
id: U256,
result: Arc<Mutex<ConfirmationResult>>,
listeners: Arc<Mutex<Vec<Listener>>>,
}
impl ConfirmationToken {
/// Submit solution to all listeners
fn resolve(&self, result: Option<RpcResult>) {
let wrapped = result.clone().map_or(ConfirmationResult::Rejected, |h| ConfirmationResult::Confirmed(h));
{
let mut res = self.result.lock();
*res = wrapped.clone();
}
// Notify listener
let listeners = {
let mut listeners = self.listeners.lock();
mem::replace(&mut *listeners, Vec::new())
};
for mut listener in listeners {
listener(result.clone());
}
}
fn as_promise(&self) -> ConfirmationPromise {
ConfirmationPromise {
id: self.request.id,
result: self.result.clone(),
listeners: self.listeners.clone(),
}
}
}
impl ConfirmationPromise {
/// Get the ID for this request.
pub fn id(&self) -> U256 { self.id }
/// Just get the result, assuming it exists.
pub fn result(&self) -> ConfirmationResult {
self.result.lock().clone()
}
pub fn wait_for_result<F>(self, callback: F) where F: FnOnce(Option<RpcResult>) + Send + 'static {
trace!(target: "own_tx", "Signer: Awaiting confirmation... ({:?}).", self.id);
let _result = self.result.lock();
let mut listeners = self.listeners.lock();
// TODO [todr] Overcoming FnBox unstability
let callback = RefCell::new(Some(callback));
listeners.push(Box::new(move |result| {
let ref mut f = *callback.borrow_mut();
f.take().expect("Callbacks are called only once.")(result)
}));
}
}
/// Receiving end of the Confirmation channel; can be used as a `Future` to await for `ConfirmationRequest`
/// being processed and turned into `ConfirmationOutcome`
pub type ConfirmationReceiver = oneshot::Receiver<ConfirmationResult>;
/// Queue for all unconfirmed requests.
#[derive(Default)]
pub struct ConfirmationsQueue {
id: Mutex<U256>,
queue: RwLock<BTreeMap<U256, ConfirmationToken>>,
queue: RwLock<BTreeMap<U256, ConfirmationSender>>,
on_event: RwLock<Vec<Box<Fn(QueueEvent) -> () + Send + Sync>>>,
}
@ -203,23 +134,26 @@ impl ConfirmationsQueue {
}
}
/// Removes requests from this queue and notifies `ConfirmationPromise` holders about the result.
/// Removes requests from this queue and notifies `ConfirmationReceiver` holder about the result.
/// Notifies also a receiver about that event.
fn remove(&self, id: U256, result: Option<RpcResult>) -> Option<ConfirmationRequest> {
let token = self.queue.write().remove(&id);
fn remove(&self, id: U256, result: Option<ConfirmationResult>) -> Option<ConfirmationRequest> {
let sender = self.queue.write().remove(&id);
if let Some(token) = token {
if let Some(sender) = sender {
// notify receiver about the event
self.notify(result.clone().map_or_else(
|| QueueEvent::RequestRejected(id),
|_| QueueEvent::RequestConfirmed(id)
));
// notify token holders about resolution
token.resolve(result);
// return a result
return Some(token.request.clone());
// notify confirmation receiver about resolution
let result = result.ok_or(errors::request_rejected());
sender.sender.send(result);
Some(sender.request)
} else {
None
}
None
}
}
@ -230,7 +164,7 @@ impl Drop for ConfirmationsQueue {
}
impl SigningQueue for ConfirmationsQueue {
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<ConfirmationPromise, QueueAddError> {
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<(U256, ConfirmationReceiver), QueueAddError> {
if self.len() > QUEUE_LIMIT {
return Err(QueueAddError::LimitReached);
}
@ -247,16 +181,17 @@ impl SigningQueue for ConfirmationsQueue {
trace!(target: "own_tx", "Signer: ({:?}) : {:?}", id, request);
let mut queue = self.queue.write();
queue.insert(id, ConfirmationToken {
result: Arc::new(Mutex::new(ConfirmationResult::Waiting)),
listeners: Default::default(),
let (sender, receiver) = oneshot::oneshot::<ConfirmationResult>();
queue.insert(id, ConfirmationSender {
sender,
request: ConfirmationRequest {
id: id,
id,
payload: request,
origin: origin,
origin,
},
});
queue.get(&id).map(|token| token.as_promise()).expect("Token was just inserted.")
(id, receiver)
};
// Notify listeners
self.notify(QueueEvent::NewRequest(id));
@ -264,7 +199,7 @@ impl SigningQueue for ConfirmationsQueue {
}
fn peek(&self, id: &U256) -> Option<ConfirmationRequest> {
self.queue.read().get(id).map(|token| token.request.clone())
self.queue.read().get(id).map(|sender| sender.request.clone())
}
fn request_rejected(&self, id: U256) -> Option<ConfirmationRequest> {
@ -272,14 +207,14 @@ impl SigningQueue for ConfirmationsQueue {
self.remove(id, None)
}
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<ConfirmationRequest> {
fn request_confirmed(&self, id: U256, result: ConfirmationResult) -> Option<ConfirmationRequest> {
debug!(target: "own_tx", "Signer: Transaction confirmed ({:?}).", id);
self.remove(id, Some(result))
}
fn requests(&self) -> Vec<ConfirmationRequest> {
let queue = self.queue.read();
queue.values().map(|token| token.request.clone()).collect()
queue.values().map(|sender| sender.request.clone()).collect()
}
fn len(&self) -> usize {
@ -296,13 +231,14 @@ impl SigningQueue for ConfirmationsQueue {
#[cfg(test)]
mod test {
use std::time::Duration;
use std::thread;
use std::sync::{mpsc, Arc};
use std::sync::Arc;
use bigint::prelude::U256;
use util::Address;
use parking_lot::Mutex;
use v1::helpers::{SigningQueue, ConfirmationsQueue, QueueEvent, FilledTransactionRequest, ConfirmationPayload};
use jsonrpc_core::futures::Future;
use v1::helpers::{
SigningQueue, ConfirmationsQueue, QueueEvent, FilledTransactionRequest, ConfirmationPayload,
};
use v1::types::ConfirmationResponse;
fn request() -> ConfirmationPayload {
@ -326,25 +262,12 @@ mod test {
let request = request();
// when
let q = queue.clone();
let handle = thread::spawn(move || {
let v = q.add_request(request, Default::default()).unwrap();
let (tx, rx) = mpsc::channel();
v.wait_for_result(move |res| {
tx.send(res).unwrap();
});
rx.recv().unwrap().expect("Should return hash")
});
let id = U256::from(1);
while queue.peek(&id).is_none() {
// Just wait for the other thread to start
thread::sleep(Duration::from_millis(100));
}
let (id, future) = queue.add_request(request, Default::default()).unwrap();
queue.request_confirmed(id, Ok(ConfirmationResponse::SendTransaction(1.into())));
// then
assert_eq!(handle.join().expect("Thread should finish nicely"), Ok(ConfirmationResponse::SendTransaction(1.into())));
let confirmation = future.wait().unwrap();
assert_eq!(confirmation, Ok(ConfirmationResponse::SendTransaction(1.into())));
}
#[test]
@ -359,7 +282,7 @@ mod test {
queue.on_event(move |notification| {
r.lock().push(notification);
});
queue.add_request(request, Default::default()).unwrap();
let _future = queue.add_request(request, Default::default()).unwrap();
queue.finish();
// then
@ -376,7 +299,7 @@ mod test {
let request = request();
// when
queue.add_request(request.clone(), Default::default()).unwrap();
let _future = queue.add_request(request.clone(), Default::default()).unwrap();
let all = queue.requests();
// then

View File

@ -24,12 +24,12 @@ use parking_lot::Mutex;
use ethcore::account_provider::AccountProvider;
use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::futures::{future, Future, Poll, Async};
use jsonrpc_core::futures::future::Either;
use v1::helpers::{
errors, oneshot,
DefaultAccount,
SIGNING_QUEUE_LIMIT, SigningQueue, ConfirmationPromise, ConfirmationResult, SignerService,
errors, DefaultAccount, SignerService, SigningQueue,
ConfirmationReceiver as RpcConfirmationReceiver,
ConfirmationResult as RpcConfirmationResult,
};
use v1::helpers::dispatch::{self, Dispatcher};
use v1::helpers::accounts::unwrap_provider;
@ -45,61 +45,67 @@ use v1::types::{
Origin,
};
use parity_reactor::Remote;
/// After 60s entries that are not queried with `check_request` will get garbage collected.
const MAX_PENDING_DURATION_SEC: u32 = 60;
/// Max number of total requests pending and completed, before we start garbage collecting them.
const MAX_TOTAL_REQUESTS: usize = SIGNING_QUEUE_LIMIT;
#[must_use = "futures do nothing unless polled"]
enum DispatchResult {
Promise(ConfirmationPromise),
Future(U256, RpcConfirmationReceiver),
Value(RpcConfirmationResponse),
}
impl Future for DispatchResult {
type Item = RpcConfirmationResponse;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
DispatchResult::Value(ref response) => Ok(Async::Ready(response.clone())),
DispatchResult::Future(_uid, ref mut future) => try_ready!(future.poll()).map(Async::Ready),
}
}
}
fn schedule(remote: Remote,
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
id: U256,
future: RpcConfirmationReceiver) {
{
let mut confirmations = confirmations.lock();
confirmations.insert(id.clone(), None);
}
let future = future.then(move |result| {
let mut confirmations = confirmations.lock();
confirmations.prune();
let result = result.and_then(|response| response);
confirmations.insert(id, Some(result));
Ok(())
});
remote.spawn(future);
}
/// Implementation of functions that require signing when no trusted signer is used.
pub struct SigningQueueClient<D> {
signer: Arc<SignerService>,
accounts: Option<Arc<AccountProvider>>,
dispatcher: D,
pending: Arc<Mutex<TransientHashMap<U256, ConfirmationPromise>>>,
}
fn handle_dispatch<OnResponse>(res: Result<DispatchResult, Error>, on_response: OnResponse)
where OnResponse: FnOnce(Result<RpcConfirmationResponse, Error>) + Send + 'static
{
match res {
Ok(DispatchResult::Value(result)) => on_response(Ok(result)),
Ok(DispatchResult::Promise(promise)) => {
promise.wait_for_result(move |result| {
on_response(result.unwrap_or_else(|| Err(errors::request_rejected())))
})
},
Err(e) => on_response(Err(e)),
}
}
fn collect_garbage(map: &mut TransientHashMap<U256, ConfirmationPromise>) {
map.prune();
if map.len() > MAX_TOTAL_REQUESTS {
// Remove all non-waiting entries.
let non_waiting: Vec<_> = map
.iter()
.filter(|&(_, val)| val.result() != ConfirmationResult::Waiting)
.map(|(key, _)| *key)
.collect();
for k in non_waiting {
map.remove(&k);
}
}
remote: Remote,
// None here means that the request hasn't yet been confirmed
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
}
impl<D: Dispatcher + 'static> SigningQueueClient<D> {
/// Creates a new signing queue client given shared signing queue.
pub fn new(signer: &Arc<SignerService>, dispatcher: D, accounts: &Option<Arc<AccountProvider>>) -> Self {
pub fn new(signer: &Arc<SignerService>, dispatcher: D, remote: Remote, accounts: &Option<Arc<AccountProvider>>) -> Self {
SigningQueueClient {
signer: signer.clone(),
accounts: accounts.clone(),
dispatcher: dispatcher,
pending: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))),
dispatcher,
remote,
confirmations: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))),
}
}
@ -126,7 +132,7 @@ impl<D: Dispatcher + 'static> SigningQueueClient<D> {
} else {
Either::B(future::done(
signer.add_request(payload, origin)
.map(DispatchResult::Promise)
.map(|(id, future)| DispatchResult::Future(id, future))
.map_err(|_| errors::request_rejected_limit())
))
}
@ -144,35 +150,31 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
}
fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
let pending = self.pending.clone();
let remote = self.remote.clone();
let confirmations = self.confirmations.clone();
Box::new(self.dispatch(
RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()),
DefaultAccount::Provided(address.into()),
meta.origin
).map(move |result| match result {
DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Promise(promise) => {
let id = promise.id();
let mut pending = pending.lock();
collect_garbage(&mut pending);
pending.insert(id, promise);
DispatchResult::Future(id, future) => {
schedule(remote, confirmations, id, future);
RpcEither::Either(id.into())
},
}))
}
fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
let pending = self.pending.clone();
Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin)
.map(move |result| match result {
DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Promise(promise) => {
let id = promise.id();
let mut pending = pending.lock();
collect_garbage(&mut pending);
pending.insert(id, promise);
let remote = self.remote.clone();
let confirmations = self.confirmations.clone();
Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin)
.map(|result| match result {
DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Future(id, future) => {
schedule(remote, confirmations, id, future);
RpcEither::Either(id.into())
},
}))
@ -180,13 +182,10 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
fn check_request(&self, id: RpcU256) -> Result<Option<RpcConfirmationResponse>, Error> {
let id: U256 = id.into();
match self.pending.lock().get(&id) {
Some(ref promise) => match promise.result() {
ConfirmationResult::Waiting => Ok(None),
ConfirmationResult::Rejected => Err(errors::request_rejected()),
ConfirmationResult::Confirmed(rpc_response) => rpc_response.map(Some),
},
_ => Err(errors::request_not_found()),
match self.confirmations.lock().get(&id) {
None => Err(errors::request_not_found()), // Request info has been dropped, or even never been there
Some(&None) => Ok(None), // No confirmation yet, request is known, confirmation is pending
Some(&Some(ref confirmation)) => confirmation.clone().map(Some), // Confirmation is there
}
}
@ -197,20 +196,12 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
meta.origin,
);
let (ready, p) = oneshot::oneshot();
// when dispatch is complete
Box::new(res.then(move |res| {
// register callback via the oneshot sender.
handle_dispatch(res, move |response| {
match response {
Ok(RpcConfirmationResponse::Decrypt(data)) => ready.send(Ok(data)),
Err(e) => ready.send(Err(e)),
e => ready.send(Err(errors::internal("Unexpected result.", e))),
}
});
p
// when dispatch is complete - wait for result and then
Box::new(res.flatten().and_then(move |response| {
match response {
RpcConfirmationResponse::Decrypt(data) => Ok(data),
e => Err(errors::internal("Unexpected result.", e)),
}
}))
}
}
@ -225,18 +216,11 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
meta.origin,
);
let (ready, p) = oneshot::oneshot();
Box::new(res.then(move |res| {
handle_dispatch(res, move |response| {
match response {
Ok(RpcConfirmationResponse::Signature(sig)) => ready.send(Ok(sig)),
Err(e) => ready.send(Err(e)),
e => ready.send(Err(errors::internal("Unexpected result.", e))),
}
});
p
Box::new(res.flatten().and_then(move |response| {
match response {
RpcConfirmationResponse::Signature(sig) => Ok(sig),
e => Err(errors::internal("Unexpected result.", e)),
}
}))
}
@ -247,18 +231,11 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
meta.origin,
);
let (ready, p) = oneshot::oneshot();
Box::new(res.then(move |res| {
handle_dispatch(res, move |response| {
match response {
Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.send(Ok(hash)),
Err(e) => ready.send(Err(e)),
e => ready.send(Err(errors::internal("Unexpected result.", e))),
}
});
p
Box::new(res.flatten().and_then(move |response| {
match response {
RpcConfirmationResponse::SendTransaction(hash) => Ok(hash),
e => Err(errors::internal("Unexpected result.", e)),
}
}))
}
@ -269,18 +246,11 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
meta.origin,
);
let (ready, p) = oneshot::oneshot();
Box::new(res.then(move |res| {
handle_dispatch(res, move |response| {
match response {
Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.send(Ok(tx)),
Err(e) => ready.send(Err(e)),
e => ready.send(Err(errors::internal("Unexpected result.", e))),
}
});
p
Box::new(res.flatten().and_then(move |response| {
match response {
RpcConfirmationResponse::SignTransaction(tx) => Ok(tx),
e => Err(errors::internal("Unexpected result.", e)),
}
}))
}
}

View File

@ -75,9 +75,9 @@ impl SyncProvider for TestSyncProvider {
vec![
PeerInfo {
id: Some("node1".to_owned()),
client_version: "Parity/1".to_owned(),
client_version: "Parity/1".to_owned(),
capabilities: vec!["eth/62".to_owned(), "eth/63".to_owned()],
remote_address: "127.0.0.1:7777".to_owned(),
remote_address: "127.0.0.1:7777".to_owned(),
local_address: "127.0.0.1:8888".to_owned(),
eth_info: Some(EthProtocolInfo {
version: 62,
@ -88,9 +88,9 @@ impl SyncProvider for TestSyncProvider {
},
PeerInfo {
id: None,
client_version: "Parity/2".to_owned(),
client_version: "Parity/2".to_owned(),
capabilities: vec!["eth/63".to_owned(), "eth/64".to_owned()],
remote_address: "Handshake".to_owned(),
remote_address: "Handshake".to_owned(),
local_address: "127.0.0.1:3333".to_owned(),
eth_info: Some(EthProtocolInfo {
version: 64,

View File

@ -80,7 +80,7 @@ fn signer_tester() -> SignerTester {
fn should_return_list_of_items_to_confirm() {
// given
let tester = signer_tester();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _send_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: Address::from(1),
used_default_from: false,
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
@ -91,7 +91,7 @@ fn should_return_list_of_items_to_confirm() {
nonce: None,
condition: None,
}), Origin::Dapps("http://parity.io".into())).unwrap();
tester.signer.add_request(ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), Origin::Unknown).unwrap();
let _sign_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), Origin::Unknown).unwrap();
// when
let request = r#"{"jsonrpc":"2.0","method":"signer_requestsToConfirm","params":[],"id":1}"#;
@ -111,7 +111,7 @@ fn should_return_list_of_items_to_confirm() {
fn should_reject_transaction_from_queue_without_dispatching() {
// given
let tester = signer_tester();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: Address::from(1),
used_default_from: false,
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
@ -138,7 +138,7 @@ fn should_reject_transaction_from_queue_without_dispatching() {
fn should_not_remove_transaction_if_password_is_invalid() {
// given
let tester = signer_tester();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: Address::from(1),
used_default_from: false,
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
@ -164,7 +164,7 @@ fn should_not_remove_transaction_if_password_is_invalid() {
fn should_not_remove_sign_if_password_is_invalid() {
// given
let tester = signer_tester();
tester.signer.add_request(ConfirmationPayload::EthSignMessage(0.into(), vec![5].into()), Origin::Unknown).unwrap();
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(0.into(), vec![5].into()), Origin::Unknown).unwrap();
assert_eq!(tester.signer.requests().len(), 1);
// when
@ -182,7 +182,7 @@ fn should_confirm_transaction_and_dispatch() {
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: address,
used_default_from: false,
to: Some(recipient),
@ -228,7 +228,7 @@ fn should_alter_the_sender_and_nonce() {
//// given
let tester = signer_tester();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: 0.into(),
used_default_from: false,
to: Some(recipient),
@ -278,7 +278,7 @@ fn should_confirm_transaction_with_token() {
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: address,
used_default_from: false,
to: Some(recipient),
@ -327,7 +327,7 @@ fn should_confirm_transaction_with_rlp() {
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: address,
used_default_from: false,
to: Some(recipient),
@ -374,7 +374,7 @@ fn should_return_error_when_sender_does_not_match() {
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: Address::default(),
used_default_from: false,
to: Some(recipient),
@ -421,7 +421,7 @@ fn should_confirm_sign_transaction_with_rlp() {
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.signer.add_request(ConfirmationPayload::SignTransaction(FilledTransactionRequest {
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SignTransaction(FilledTransactionRequest {
from: address,
used_default_from: false,
to: Some(recipient),
@ -485,7 +485,7 @@ fn should_confirm_data_sign_with_signature() {
// given
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
tester.signer.add_request(ConfirmationPayload::EthSignMessage(
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(
address,
vec![1, 2, 3, 4].into(),
), Origin::Unknown).unwrap();
@ -515,7 +515,7 @@ fn should_confirm_decrypt_with_phrase() {
// given
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
tester.signer.add_request(ConfirmationPayload::Decrypt(
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::Decrypt(
address,
vec![1, 2, 3, 4].into(),
), Origin::Unknown).unwrap();

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
@ -39,6 +40,8 @@ use ethcore::transaction::{Transaction, Action, SignedTransaction};
use ethstore::ethkey::{Generator, Random};
use serde_json;
use parity_reactor::Remote;
struct SigningTester {
pub signer: Arc<SignerService>,
pub client: Arc<TestBlockChainClient>,
@ -58,9 +61,11 @@ impl Default for SigningTester {
let dispatcher = FullDispatcher::new(client.clone(), miner.clone());
let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), &opt_accounts);
let remote = Remote::new_thread_per_future();
let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), remote.clone(), &opt_accounts);
io.extend_with(EthSigning::to_delegate(rpc));
let rpc = SigningQueueClient::new(&signer, dispatcher, &opt_accounts);
let rpc = SigningQueueClient::new(&signer, dispatcher, remote, &opt_accounts);
io.extend_with(ParitySigning::to_delegate(rpc));
SigningTester {
@ -184,6 +189,9 @@ fn should_check_status_of_request_when_its_resolved() {
tester.io.handle_request_sync(&request).expect("Sent");
tester.signer.request_confirmed(1.into(), Ok(ConfirmationResponse::Signature(1.into())));
// This is not ideal, but we need to give futures some time to be executed, and they need to run in a separate thread
thread::sleep(Duration::from_millis(20));
// when
let request = r#"{
"jsonrpc": "2.0",

View File

@ -47,13 +47,13 @@ impl From<helpers::ConfirmationRequest> for ConfirmationRequest {
}
impl fmt::Display for ConfirmationRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{}: {} coming from {}", self.id, self.payload, self.origin)
}
}
impl fmt::Display for ConfirmationPayload {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ConfirmationPayload::SendTransaction(ref transaction) => write!(f, "{}", transaction),
ConfirmationPayload::SignTransaction(ref transaction) => write!(f, "(Sign only) {}", transaction),
@ -83,7 +83,7 @@ impl From<(H160, Bytes)> for SignRequest {
}
impl fmt::Display for SignRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"sign 0x{} with {}",
@ -113,7 +113,7 @@ impl From<(H160, Bytes)> for DecryptRequest {
}
impl fmt::Display for DecryptRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"decrypt data with {}",

View File

@ -62,7 +62,7 @@ pub fn format_ether(i: U256) -> String {
}
impl fmt::Display for TransactionRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let eth = self.value.unwrap_or(U256::from(0));
match self.to {
Some(ref to) => write!(

View File

@ -32,6 +32,7 @@ ethcore-bytes = { path = "bytes" }
memorydb = { path = "memorydb" }
util-error = { path = "error" }
kvdb = { path = "kvdb" }
journaldb = { path = "journaldb" }
[dev-dependencies]
kvdb-memorydb = { path = "kvdb-memorydb" }

23
util/journaldb/Cargo.toml Normal file
View File

@ -0,0 +1,23 @@
[package]
name = "journaldb"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "A `HashDB` which can manage a short-term journal potentially containing many forks of mutually exclusive actions"
license = "GPL3"
[dependencies]
ethcore-bigint = { path = "../bigint", features = ["heapsizeof"] }
ethcore-bytes = { path = "../bytes" }
hashdb = { path = "../hashdb" }
heapsize = "0.4"
kvdb = { path = "../kvdb" }
log = "0.3"
memorydb = { path = "../memorydb" }
parking_lot = "0.4"
rlp = { path = "../rlp" }
util-error = { path = "../error" }
[dev-dependencies]
ethcore-logger = { path = "../../logger" }
hash = { path = "../hash" }
kvdb-memorydb = { path = "../kvdb-memorydb" }

View File

@ -21,9 +21,9 @@ use std::collections::hash_map::Entry;
use std::sync::Arc;
use rlp::*;
use hashdb::*;
use super::super::memorydb::*;
use super::memorydb::*;
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
use super::traits::JournalDB;
use traits::JournalDB;
use kvdb::{KeyValueDB, DBTransaction};
use bigint::hash::H256;
use error::{BaseDataError, UtilError};
@ -202,8 +202,7 @@ mod tests {
use keccak::keccak;
use hashdb::{HashDB, DBValue};
use super::*;
use journaldb::traits::JournalDB;
use kvdb_memorydb;
use {kvdb_memorydb, JournalDB};
#[test]
fn insert_same_in_fork() {

View File

@ -16,6 +16,26 @@
//! `JournalDB` interface and implementation.
extern crate heapsize;
#[macro_use]
extern crate log;
extern crate ethcore_bigint as bigint;
extern crate ethcore_bytes as bytes;
extern crate hashdb;
extern crate kvdb;
extern crate memorydb;
extern crate parking_lot;
extern crate rlp;
extern crate util_error as error;
#[cfg(test)]
extern crate ethcore_logger;
#[cfg(test)]
extern crate hash as keccak;
#[cfg(test)]
extern crate kvdb_memorydb;
use std::{fmt, str};
use std::sync::Arc;
@ -26,6 +46,8 @@ mod earlymergedb;
mod overlayrecentdb;
mod refcounteddb;
pub mod overlaydb;
/// Export the `JournalDB` trait.
pub use self::traits::JournalDB;

View File

@ -459,8 +459,7 @@ mod tests {
use super::*;
use hashdb::{HashDB, DBValue};
use ethcore_logger::init_log;
use journaldb::JournalDB;
use kvdb_memorydb;
use {kvdb_memorydb, JournalDB};
fn new_db() -> OverlayRecentDB {
let backing = Arc::new(kvdb_memorydb::create(0));

View File

@ -210,9 +210,8 @@ mod tests {
use keccak::keccak;
use hashdb::{HashDB, DBValue};
use kvdb_memorydb;
use super::*;
use super::super::traits::JournalDB;
use {JournalDB, kvdb_memorydb};
fn new_db() -> RefCountedDB {
let backing = Arc::new(kvdb_memorydb::create(0));

View File

@ -112,18 +112,12 @@ extern crate util_error as error;
#[cfg(test)]
extern crate kvdb_memorydb;
#[macro_use]
extern crate log as rlog;
pub mod misc;
pub mod overlaydb;
pub mod journaldb;
pub use misc::*;
pub use hashdb::*;
pub use memorydb::MemoryDB;
pub use overlaydb::*;
pub use journaldb::JournalDB;
/// 160-bit integer representing account address
pub type Address = bigint::hash::H160;