Merge branch 'master' into td-ui-2
This commit is contained in:
commit
195305ce2e
27
Cargo.lock
generated
27
Cargo.lock
generated
@ -539,6 +539,7 @@ dependencies = [
|
|||||||
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
|
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
|
||||||
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"journaldb 0.1.0",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
"kvdb-memorydb 0.1.0",
|
"kvdb-memorydb 0.1.0",
|
||||||
"kvdb-rocksdb 0.1.0",
|
"kvdb-rocksdb 0.1.0",
|
||||||
@ -772,6 +773,7 @@ dependencies = [
|
|||||||
"hash 0.1.0",
|
"hash 0.1.0",
|
||||||
"hashdb 0.1.0",
|
"hashdb 0.1.0",
|
||||||
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"journaldb 0.1.0",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
"kvdb-memorydb 0.1.0",
|
"kvdb-memorydb 0.1.0",
|
||||||
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1252,6 +1254,25 @@ name = "itoa"
|
|||||||
version = "0.3.4"
|
version = "0.3.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "journaldb"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"ethcore-bigint 0.1.3",
|
||||||
|
"ethcore-bytes 0.1.0",
|
||||||
|
"ethcore-logger 1.9.0",
|
||||||
|
"hash 0.1.0",
|
||||||
|
"hashdb 0.1.0",
|
||||||
|
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-memorydb 0.1.0",
|
||||||
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"memorydb 0.1.0",
|
||||||
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rlp 0.2.0",
|
||||||
|
"util-error 0.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-core"
|
name = "jsonrpc-core"
|
||||||
version = "8.0.0"
|
version = "8.0.0"
|
||||||
@ -1912,7 +1933,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity"
|
name = "parity"
|
||||||
version = "1.8.0"
|
version = "1.9.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1941,6 +1962,7 @@ dependencies = [
|
|||||||
"hash 0.1.0",
|
"hash 0.1.0",
|
||||||
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"journaldb 0.1.0",
|
||||||
"jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
"jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
"kvdb-rocksdb 0.1.0",
|
"kvdb-rocksdb 0.1.0",
|
||||||
@ -2137,6 +2159,7 @@ dependencies = [
|
|||||||
"ethstore 0.1.0",
|
"ethstore 0.1.0",
|
||||||
"ethsync 1.9.0",
|
"ethsync 1.9.0",
|
||||||
"fetch 0.1.0",
|
"fetch 0.1.0",
|
||||||
|
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hardware-wallet 1.9.0",
|
"hardware-wallet 1.9.0",
|
||||||
"hash 0.1.0",
|
"hash 0.1.0",
|
||||||
@ -2242,7 +2265,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-ui-precompiled"
|
name = "parity-ui-precompiled"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
source = "git+https://github.com/paritytech/js-precompiled.git#4b77a23c3e55aed45725f43cd2a499676375b995"
|
source = "git+https://github.com/paritytech/js-precompiled.git#fe0b4dbdfe6e7ebebb247d565d937fd0a0feca5f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-dapps-glue 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-dapps-glue 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
description = "Parity Ethereum client"
|
description = "Parity Ethereum client"
|
||||||
name = "parity"
|
name = "parity"
|
||||||
version = "1.8.0"
|
version = "1.9.0"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
@ -62,6 +62,7 @@ hash = { path = "util/hash" }
|
|||||||
migration = { path = "util/migration" }
|
migration = { path = "util/migration" }
|
||||||
kvdb = { path = "util/kvdb" }
|
kvdb = { path = "util/kvdb" }
|
||||||
kvdb-rocksdb = { path = "util/kvdb-rocksdb" }
|
kvdb-rocksdb = { path = "util/kvdb-rocksdb" }
|
||||||
|
journaldb = { path = "util/journaldb" }
|
||||||
|
|
||||||
parity-dapps = { path = "dapps", optional = true }
|
parity-dapps = { path = "dapps", optional = true }
|
||||||
clippy = { version = "0.0.103", optional = true}
|
clippy = { version = "0.0.103", optional = true}
|
||||||
|
@ -13,7 +13,7 @@ futures = "0.1"
|
|||||||
futures-cpupool = "0.1"
|
futures-cpupool = "0.1"
|
||||||
linked-hash-map = "0.5"
|
linked-hash-map = "0.5"
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
parity-dapps-glue = "1.8"
|
parity-dapps-glue = "1.9"
|
||||||
parking_lot = "0.4"
|
parking_lot = "0.4"
|
||||||
mime_guess = "2.0.0-alpha.2"
|
mime_guess = "2.0.0-alpha.2"
|
||||||
rand = "0.3"
|
rand = "0.3"
|
||||||
|
@ -71,6 +71,7 @@ hash = { path = "../util/hash" }
|
|||||||
triehash = { path = "../util/triehash" }
|
triehash = { path = "../util/triehash" }
|
||||||
semantic_version = { path = "../util/semantic_version" }
|
semantic_version = { path = "../util/semantic_version" }
|
||||||
unexpected = { path = "../util/unexpected" }
|
unexpected = { path = "../util/unexpected" }
|
||||||
|
journaldb = { path = "../util/journaldb" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
native-contracts = { path = "native_contracts", features = ["test_contracts"] }
|
native-contracts = { path = "native_contracts", features = ["test_contracts"] }
|
||||||
|
@ -1 +1 @@
|
|||||||
Subproject commit 9b722a014a2b2c9ea6eac456fe01a5c3dd1042a8
|
Subproject commit b6011c3fb567d7178915574de0a8d4b5331fe725
|
@ -26,7 +26,8 @@ use itertools::Itertools;
|
|||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use timer::PerfTimer;
|
use timer::PerfTimer;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use util::{Address, journaldb, DBValue};
|
use util::{Address, DBValue};
|
||||||
|
use journaldb;
|
||||||
use util_error::UtilError;
|
use util_error::UtilError;
|
||||||
use trie::{TrieSpec, TrieFactory, Trie};
|
use trie::{TrieSpec, TrieFactory, Trie};
|
||||||
use kvdb::{KeyValueDB, DBTransaction};
|
use kvdb::{KeyValueDB, DBTransaction};
|
||||||
|
@ -20,7 +20,7 @@ use std::fmt::{Display, Formatter, Error as FmtError};
|
|||||||
|
|
||||||
use mode::Mode as IpcMode;
|
use mode::Mode as IpcMode;
|
||||||
use verification::{VerifierType, QueueConfig};
|
use verification::{VerifierType, QueueConfig};
|
||||||
use util::journaldb;
|
use journaldb;
|
||||||
use kvdb_rocksdb::CompactionProfile;
|
use kvdb_rocksdb::CompactionProfile;
|
||||||
|
|
||||||
pub use std::time::Duration;
|
pub use std::time::Duration;
|
||||||
|
@ -20,7 +20,7 @@ use std::fmt;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::journaldb;
|
use journaldb;
|
||||||
use {trie, kvdb_memorydb, bytes};
|
use {trie, kvdb_memorydb, bytes};
|
||||||
use kvdb::{self, KeyValueDB};
|
use kvdb::{self, KeyValueDB};
|
||||||
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};
|
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};
|
||||||
|
@ -26,7 +26,8 @@ use hash::keccak;
|
|||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use util::*;
|
use journaldb;
|
||||||
|
use util::{Address, DBValue};
|
||||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
|
@ -131,6 +131,7 @@ extern crate vm;
|
|||||||
extern crate wasm;
|
extern crate wasm;
|
||||||
extern crate ethcore_util as util;
|
extern crate ethcore_util as util;
|
||||||
extern crate memory_cache;
|
extern crate memory_cache;
|
||||||
|
extern crate journaldb;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate macros;
|
extern crate macros;
|
||||||
|
@ -23,7 +23,7 @@ use trie::TrieDB;
|
|||||||
use views::HeaderView;
|
use views::HeaderView;
|
||||||
use bloom_journal::Bloom;
|
use bloom_journal::Bloom;
|
||||||
use migration::{Error, Migration, Progress, Batch, Config, ErrorKind};
|
use migration::{Error, Migration, Progress, Batch, Config, ErrorKind};
|
||||||
use util::journaldb;
|
use journaldb;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use trie::Trie;
|
use trie::Trie;
|
||||||
use kvdb::{DBTransaction, ResultExt};
|
use kvdb::{DBTransaction, ResultExt};
|
||||||
|
@ -36,7 +36,7 @@ use util::{HashDB, DBValue};
|
|||||||
use snappy;
|
use snappy;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use util::journaldb::{self, Algorithm, JournalDB};
|
use journaldb::{self, Algorithm, JournalDB};
|
||||||
use kvdb::KeyValueDB;
|
use kvdb::KeyValueDB;
|
||||||
use trie::{TrieDB, TrieDBMut, Trie, TrieMut};
|
use trie::{TrieDB, TrieDBMut, Trie, TrieMut};
|
||||||
use rlp::{RlpStream, UntrustedRlp};
|
use rlp::{RlpStream, UntrustedRlp};
|
||||||
|
@ -39,7 +39,7 @@ use bigint::hash::H256;
|
|||||||
use parking_lot::{Mutex, RwLock, RwLockReadGuard};
|
use parking_lot::{Mutex, RwLock, RwLockReadGuard};
|
||||||
use util_error::UtilError;
|
use util_error::UtilError;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use snappy;
|
use snappy;
|
||||||
|
|
||||||
@ -625,7 +625,7 @@ mod tests {
|
|||||||
use io::{IoService};
|
use io::{IoService};
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
use tests::helpers::get_test_spec;
|
use tests::helpers::get_test_spec;
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use error::Error;
|
use error::Error;
|
||||||
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
|
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -35,7 +35,7 @@ use util::DBValue;
|
|||||||
use kvdb::KeyValueDB;
|
use kvdb::KeyValueDB;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use hashdb::HashDB;
|
use hashdb::HashDB;
|
||||||
use util::journaldb;
|
use journaldb;
|
||||||
use trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode};
|
use trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode};
|
||||||
use trie::{TrieDB, TrieDBMut, Trie};
|
use trie::{TrieDB, TrieDBMut, Trie};
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ fn restored_is_equivalent() {
|
|||||||
engine: spec.engine.clone(),
|
engine: spec.engine.clone(),
|
||||||
genesis_block: spec.genesis_block(),
|
genesis_block: spec.genesis_block(),
|
||||||
db_config: db_config,
|
db_config: db_config,
|
||||||
pruning: ::util::journaldb::Algorithm::Archive,
|
pruning: ::journaldb::Algorithm::Archive,
|
||||||
channel: IoChannel::disconnected(),
|
channel: IoChannel::disconnected(),
|
||||||
snapshot_root: path,
|
snapshot_root: path,
|
||||||
db_restore: client2.clone(),
|
db_restore: client2.clone(),
|
||||||
@ -112,7 +112,7 @@ fn guards_delete_folders() {
|
|||||||
engine: spec.engine.clone(),
|
engine: spec.engine.clone(),
|
||||||
genesis_block: spec.genesis_block(),
|
genesis_block: spec.genesis_block(),
|
||||||
db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS),
|
db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS),
|
||||||
pruning: ::util::journaldb::Algorithm::Archive,
|
pruning: ::journaldb::Algorithm::Archive,
|
||||||
channel: IoChannel::disconnected(),
|
channel: IoChannel::disconnected(),
|
||||||
snapshot_root: path.clone(),
|
snapshot_root: path.clone(),
|
||||||
db_restore: Arc::new(NoopDBRestore),
|
db_restore: Arc::new(NoopDBRestore),
|
||||||
|
@ -26,7 +26,7 @@ use error::Error;
|
|||||||
|
|
||||||
use rand::{XorShiftRng, SeedableRng};
|
use rand::{XorShiftRng, SeedableRng};
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::journaldb::{self, Algorithm};
|
use journaldb::{self, Algorithm};
|
||||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use memorydb::MemoryDB;
|
use memorydb::MemoryDB;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
@ -670,7 +670,7 @@ impl Spec {
|
|||||||
/// constructor.
|
/// constructor.
|
||||||
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
|
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
|
||||||
use transaction::{Action, Transaction};
|
use transaction::{Action, Transaction};
|
||||||
use util::journaldb;
|
use journaldb;
|
||||||
use kvdb_memorydb;
|
use kvdb_memorydb;
|
||||||
|
|
||||||
let genesis = self.genesis_header();
|
let genesis = self.genesis_header();
|
||||||
|
@ -18,7 +18,7 @@ use std::collections::{VecDeque, HashSet};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
use memory_cache::MemoryLruCache;
|
use memory_cache::MemoryLruCache;
|
||||||
use util::journaldb::JournalDB;
|
use journaldb::JournalDB;
|
||||||
use kvdb::{KeyValueDB, DBTransaction};
|
use kvdb::{KeyValueDB, DBTransaction};
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use hashdb::HashDB;
|
use hashdb::HashDB;
|
||||||
|
@ -36,7 +36,6 @@ use state_db::StateDB;
|
|||||||
use state::*;
|
use state::*;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use transaction::{Action, Transaction, SignedTransaction};
|
use transaction::{Action, Transaction, SignedTransaction};
|
||||||
use util::*;
|
|
||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
|
|
||||||
// TODO: move everything over to get_null_spec.
|
// TODO: move everything over to get_null_spec.
|
||||||
@ -282,7 +281,7 @@ pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::Sta
|
|||||||
|
|
||||||
pub fn get_temp_state_db() -> StateDB {
|
pub fn get_temp_state_db() -> StateDB {
|
||||||
let db = new_db();
|
let db = new_db();
|
||||||
let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
|
let journal_db = ::journaldb::new(db, ::journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
|
||||||
StateDB::new(journal_db, 5 * 1024 * 1024)
|
StateDB::new(journal_db, 5 * 1024 * 1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1 +1 @@
|
|||||||
// test script 11
|
// test script 12
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::{PathBuf, Path};
|
use std::path::{PathBuf, Path};
|
||||||
use bigint::hash::{H64, H256};
|
use bigint::hash::{H64, H256};
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use helpers::{replace_home, replace_home_and_local};
|
use helpers::{replace_home, replace_home_and_local};
|
||||||
use app_dirs::{AppInfo, get_app_root, AppDataType};
|
use app_dirs::{AppInfo, get_app_root, AppDataType};
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ use bigint::prelude::U256;
|
|||||||
use bigint::hash::clean_0x;
|
use bigint::hash::clean_0x;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use kvdb_rocksdb::CompactionProfile;
|
use kvdb_rocksdb::CompactionProfile;
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
|
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
|
||||||
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
|
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
|
@ -76,6 +76,7 @@ extern crate path;
|
|||||||
extern crate rpc_cli;
|
extern crate rpc_cli;
|
||||||
extern crate node_filter;
|
extern crate node_filter;
|
||||||
extern crate hash;
|
extern crate hash;
|
||||||
|
extern crate journaldb;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log as rlog;
|
extern crate log as rlog;
|
||||||
|
@ -20,7 +20,7 @@ use std::io::{Read, Write, Error as IoError, ErrorKind};
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::fmt::{Display, Formatter, Error as FmtError};
|
use std::fmt::{Display, Formatter, Error as FmtError};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use migr::{self, Manager as MigrationManager, Config as MigrationConfig, Migration};
|
use migr::{self, Manager as MigrationManager, Config as MigrationConfig, Migration};
|
||||||
use kvdb;
|
use kvdb;
|
||||||
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
|
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
|
||||||
@ -282,7 +282,6 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
|
|||||||
mod legacy {
|
mod legacy {
|
||||||
use super::*;
|
use super::*;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use util::journaldb::Algorithm;
|
|
||||||
use migr::{Manager as MigrationManager};
|
use migr::{Manager as MigrationManager};
|
||||||
use kvdb_rocksdb::CompactionProfile;
|
use kvdb_rocksdb::CompactionProfile;
|
||||||
use ethcore::migrations;
|
use ethcore::migrations;
|
||||||
|
@ -18,7 +18,7 @@ use std::{str, fs, fmt};
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use util::{Address, version_data};
|
use util::{Address, version_data};
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use ethcore::spec::{Spec, SpecParams};
|
use ethcore::spec::{Spec, SpecParams};
|
||||||
use ethcore::ethereum;
|
use ethcore::ethereum;
|
||||||
use ethcore::client::Mode;
|
use ethcore::client::Mode;
|
||||||
@ -326,7 +326,7 @@ pub fn mode_switch_to_bool(switch: Option<Mode>, user_defaults: &UserDefaults) -
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use user_defaults::UserDefaults;
|
use user_defaults::UserDefaults;
|
||||||
use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool};
|
use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool};
|
||||||
|
|
||||||
|
@ -244,7 +244,7 @@ impl FullDependencies {
|
|||||||
let deps = &$deps;
|
let deps = &$deps;
|
||||||
let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone());
|
let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone());
|
||||||
if deps.signer_service.is_enabled() {
|
if deps.signer_service.is_enabled() {
|
||||||
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store)))
|
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &deps.secret_store)))
|
||||||
} else {
|
} else {
|
||||||
$handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher)))
|
$handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher)))
|
||||||
}
|
}
|
||||||
@ -445,7 +445,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
|
|||||||
let secret_store = Some(deps.secret_store.clone());
|
let secret_store = Some(deps.secret_store.clone());
|
||||||
if deps.signer_service.is_enabled() {
|
if deps.signer_service.is_enabled() {
|
||||||
$handler.extend_with($namespace::to_delegate(
|
$handler.extend_with($namespace::to_delegate(
|
||||||
SigningQueueClient::new(&deps.signer_service, dispatcher, &secret_store)
|
SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &secret_store)
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
$handler.extend_with(
|
$handler.extend_with(
|
||||||
|
@ -42,7 +42,7 @@ use ansi_term::Colour;
|
|||||||
use util::version;
|
use util::version;
|
||||||
use parking_lot::{Condvar, Mutex};
|
use parking_lot::{Condvar, Mutex};
|
||||||
use node_filter::NodeFilter;
|
use node_filter::NodeFilter;
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
|
|
||||||
use params::{
|
use params::{
|
||||||
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
|
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
|
||||||
|
@ -25,7 +25,7 @@ use std::io::{Read, Write};
|
|||||||
use std::path::{PathBuf, Path};
|
use std::path::{PathBuf, Path};
|
||||||
use dir::{DatabaseDirectories, default_data_path};
|
use dir::{DatabaseDirectories, default_data_path};
|
||||||
use helpers::replace_home;
|
use helpers::replace_home;
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -26,7 +26,7 @@ use serde::de::value::MapAccessDeserializer;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use serde_json::de::from_reader;
|
use serde_json::de::from_reader;
|
||||||
use serde_json::ser::to_string;
|
use serde_json::ser::to_string;
|
||||||
use util::journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use ethcore::client::Mode;
|
use ethcore::client::Mode;
|
||||||
|
|
||||||
pub struct UserDefaults {
|
pub struct UserDefaults {
|
||||||
|
@ -10,6 +10,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
ansi_term = "0.9"
|
ansi_term = "0.9"
|
||||||
cid = "0.2"
|
cid = "0.2"
|
||||||
|
futures = "0.1.6"
|
||||||
futures-cpupool = "0.1"
|
futures-cpupool = "0.1"
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
multihash ="0.6"
|
multihash ="0.6"
|
||||||
|
@ -20,6 +20,9 @@
|
|||||||
#![cfg_attr(feature="dev", feature(plugin))]
|
#![cfg_attr(feature="dev", feature(plugin))]
|
||||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate futures;
|
||||||
|
|
||||||
extern crate ansi_term;
|
extern crate ansi_term;
|
||||||
extern crate cid;
|
extern crate cid;
|
||||||
extern crate crypto as rust_crypto;
|
extern crate crypto as rust_crypto;
|
||||||
|
@ -44,7 +44,8 @@ pub use self::requests::{
|
|||||||
TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest,
|
TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest,
|
||||||
};
|
};
|
||||||
pub use self::signing_queue::{
|
pub use self::signing_queue::{
|
||||||
ConfirmationsQueue, ConfirmationPromise, ConfirmationResult, SigningQueue, QueueEvent, DefaultAccount,
|
ConfirmationsQueue, ConfirmationReceiver, ConfirmationResult,
|
||||||
|
SigningQueue, QueueEvent, DefaultAccount,
|
||||||
QUEUE_LIMIT as SIGNING_QUEUE_LIMIT,
|
QUEUE_LIMIT as SIGNING_QUEUE_LIMIT,
|
||||||
};
|
};
|
||||||
pub use self::signer::SignerService;
|
pub use self::signer::SignerService;
|
||||||
|
@ -14,20 +14,18 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::mem;
|
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use jsonrpc_core;
|
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use ethcore::account_provider::DappId;
|
use ethcore::account_provider::DappId;
|
||||||
use v1::helpers::{ConfirmationRequest, ConfirmationPayload};
|
use v1::helpers::{ConfirmationRequest, ConfirmationPayload, oneshot, errors};
|
||||||
use v1::types::{ConfirmationResponse, H160 as RpcH160, Origin, DappId as RpcDappId};
|
use v1::types::{ConfirmationResponse, H160 as RpcH160, Origin, DappId as RpcDappId};
|
||||||
|
|
||||||
|
use jsonrpc_core::Error;
|
||||||
|
|
||||||
/// Result that can be returned from JSON RPC.
|
/// Result that can be returned from JSON RPC.
|
||||||
pub type RpcResult = Result<ConfirmationResponse, jsonrpc_core::Error>;
|
pub type ConfirmationResult = Result<ConfirmationResponse, Error>;
|
||||||
|
|
||||||
/// Type of default account
|
/// Type of default account
|
||||||
pub enum DefaultAccount {
|
pub enum DefaultAccount {
|
||||||
@ -74,8 +72,9 @@ pub const QUEUE_LIMIT: usize = 50;
|
|||||||
/// A queue of transactions awaiting to be confirmed and signed.
|
/// A queue of transactions awaiting to be confirmed and signed.
|
||||||
pub trait SigningQueue: Send + Sync {
|
pub trait SigningQueue: Send + Sync {
|
||||||
/// Add new request to the queue.
|
/// Add new request to the queue.
|
||||||
/// Returns a `ConfirmationPromise` that can be used to await for resolution of given request.
|
/// Returns a `Result` wrapping `ConfirmationReceiver` together with it's unique id in the queue.
|
||||||
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<ConfirmationPromise, QueueAddError>;
|
/// `ConfirmationReceiver` is a `Future` awaiting for resolution of the given request.
|
||||||
|
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<(U256, ConfirmationReceiver), QueueAddError>;
|
||||||
|
|
||||||
/// Removes a request from the queue.
|
/// Removes a request from the queue.
|
||||||
/// Notifies possible token holders that request was rejected.
|
/// Notifies possible token holders that request was rejected.
|
||||||
@ -83,7 +82,7 @@ pub trait SigningQueue: Send + Sync {
|
|||||||
|
|
||||||
/// Removes a request from the queue.
|
/// Removes a request from the queue.
|
||||||
/// Notifies possible token holders that request was confirmed and given hash was assigned.
|
/// Notifies possible token holders that request was confirmed and given hash was assigned.
|
||||||
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<ConfirmationRequest>;
|
fn request_confirmed(&self, id: U256, result: ConfirmationResult) -> Option<ConfirmationRequest>;
|
||||||
|
|
||||||
/// Returns a request if it is contained in the queue.
|
/// Returns a request if it is contained in the queue.
|
||||||
fn peek(&self, id: &U256) -> Option<ConfirmationRequest>;
|
fn peek(&self, id: &U256) -> Option<ConfirmationRequest>;
|
||||||
@ -98,88 +97,20 @@ pub trait SigningQueue: Send + Sync {
|
|||||||
fn is_empty(&self) -> bool;
|
fn is_empty(&self) -> bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
struct ConfirmationSender {
|
||||||
/// Result of a pending confirmation request.
|
sender: oneshot::Sender<ConfirmationResult>,
|
||||||
pub enum ConfirmationResult {
|
|
||||||
/// The request has not yet been confirmed nor rejected.
|
|
||||||
Waiting,
|
|
||||||
/// The request has been rejected.
|
|
||||||
Rejected,
|
|
||||||
/// The request has been confirmed.
|
|
||||||
Confirmed(RpcResult),
|
|
||||||
}
|
|
||||||
|
|
||||||
type Listener = Box<FnMut(Option<RpcResult>) + Send>;
|
|
||||||
|
|
||||||
/// A handle to submitted request.
|
|
||||||
/// Allows to block and wait for a resolution of that request.
|
|
||||||
pub struct ConfirmationToken {
|
|
||||||
result: Arc<Mutex<ConfirmationResult>>,
|
|
||||||
listeners: Arc<Mutex<Vec<Listener>>>,
|
|
||||||
request: ConfirmationRequest,
|
request: ConfirmationRequest,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ConfirmationPromise {
|
/// Receiving end of the Confirmation channel; can be used as a `Future` to await for `ConfirmationRequest`
|
||||||
id: U256,
|
/// being processed and turned into `ConfirmationOutcome`
|
||||||
result: Arc<Mutex<ConfirmationResult>>,
|
pub type ConfirmationReceiver = oneshot::Receiver<ConfirmationResult>;
|
||||||
listeners: Arc<Mutex<Vec<Listener>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfirmationToken {
|
|
||||||
/// Submit solution to all listeners
|
|
||||||
fn resolve(&self, result: Option<RpcResult>) {
|
|
||||||
let wrapped = result.clone().map_or(ConfirmationResult::Rejected, |h| ConfirmationResult::Confirmed(h));
|
|
||||||
{
|
|
||||||
let mut res = self.result.lock();
|
|
||||||
*res = wrapped.clone();
|
|
||||||
}
|
|
||||||
// Notify listener
|
|
||||||
let listeners = {
|
|
||||||
let mut listeners = self.listeners.lock();
|
|
||||||
mem::replace(&mut *listeners, Vec::new())
|
|
||||||
};
|
|
||||||
for mut listener in listeners {
|
|
||||||
listener(result.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_promise(&self) -> ConfirmationPromise {
|
|
||||||
ConfirmationPromise {
|
|
||||||
id: self.request.id,
|
|
||||||
result: self.result.clone(),
|
|
||||||
listeners: self.listeners.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfirmationPromise {
|
|
||||||
/// Get the ID for this request.
|
|
||||||
pub fn id(&self) -> U256 { self.id }
|
|
||||||
|
|
||||||
/// Just get the result, assuming it exists.
|
|
||||||
pub fn result(&self) -> ConfirmationResult {
|
|
||||||
self.result.lock().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn wait_for_result<F>(self, callback: F) where F: FnOnce(Option<RpcResult>) + Send + 'static {
|
|
||||||
trace!(target: "own_tx", "Signer: Awaiting confirmation... ({:?}).", self.id);
|
|
||||||
let _result = self.result.lock();
|
|
||||||
let mut listeners = self.listeners.lock();
|
|
||||||
// TODO [todr] Overcoming FnBox unstability
|
|
||||||
let callback = RefCell::new(Some(callback));
|
|
||||||
listeners.push(Box::new(move |result| {
|
|
||||||
let ref mut f = *callback.borrow_mut();
|
|
||||||
f.take().expect("Callbacks are called only once.")(result)
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/// Queue for all unconfirmed requests.
|
/// Queue for all unconfirmed requests.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct ConfirmationsQueue {
|
pub struct ConfirmationsQueue {
|
||||||
id: Mutex<U256>,
|
id: Mutex<U256>,
|
||||||
queue: RwLock<BTreeMap<U256, ConfirmationToken>>,
|
queue: RwLock<BTreeMap<U256, ConfirmationSender>>,
|
||||||
on_event: RwLock<Vec<Box<Fn(QueueEvent) -> () + Send + Sync>>>,
|
on_event: RwLock<Vec<Box<Fn(QueueEvent) -> () + Send + Sync>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,24 +134,27 @@ impl ConfirmationsQueue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes requests from this queue and notifies `ConfirmationPromise` holders about the result.
|
/// Removes requests from this queue and notifies `ConfirmationReceiver` holder about the result.
|
||||||
/// Notifies also a receiver about that event.
|
/// Notifies also a receiver about that event.
|
||||||
fn remove(&self, id: U256, result: Option<RpcResult>) -> Option<ConfirmationRequest> {
|
fn remove(&self, id: U256, result: Option<ConfirmationResult>) -> Option<ConfirmationRequest> {
|
||||||
let token = self.queue.write().remove(&id);
|
let sender = self.queue.write().remove(&id);
|
||||||
|
|
||||||
if let Some(token) = token {
|
if let Some(sender) = sender {
|
||||||
// notify receiver about the event
|
// notify receiver about the event
|
||||||
self.notify(result.clone().map_or_else(
|
self.notify(result.clone().map_or_else(
|
||||||
|| QueueEvent::RequestRejected(id),
|
|| QueueEvent::RequestRejected(id),
|
||||||
|_| QueueEvent::RequestConfirmed(id)
|
|_| QueueEvent::RequestConfirmed(id)
|
||||||
));
|
));
|
||||||
// notify token holders about resolution
|
|
||||||
token.resolve(result);
|
// notify confirmation receiver about resolution
|
||||||
// return a result
|
let result = result.ok_or(errors::request_rejected());
|
||||||
return Some(token.request.clone());
|
sender.sender.send(result);
|
||||||
}
|
|
||||||
|
Some(sender.request)
|
||||||
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for ConfirmationsQueue {
|
impl Drop for ConfirmationsQueue {
|
||||||
@ -230,7 +164,7 @@ impl Drop for ConfirmationsQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SigningQueue for ConfirmationsQueue {
|
impl SigningQueue for ConfirmationsQueue {
|
||||||
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<ConfirmationPromise, QueueAddError> {
|
fn add_request(&self, request: ConfirmationPayload, origin: Origin) -> Result<(U256, ConfirmationReceiver), QueueAddError> {
|
||||||
if self.len() > QUEUE_LIMIT {
|
if self.len() > QUEUE_LIMIT {
|
||||||
return Err(QueueAddError::LimitReached);
|
return Err(QueueAddError::LimitReached);
|
||||||
}
|
}
|
||||||
@ -247,16 +181,17 @@ impl SigningQueue for ConfirmationsQueue {
|
|||||||
trace!(target: "own_tx", "Signer: ({:?}) : {:?}", id, request);
|
trace!(target: "own_tx", "Signer: ({:?}) : {:?}", id, request);
|
||||||
|
|
||||||
let mut queue = self.queue.write();
|
let mut queue = self.queue.write();
|
||||||
queue.insert(id, ConfirmationToken {
|
let (sender, receiver) = oneshot::oneshot::<ConfirmationResult>();
|
||||||
result: Arc::new(Mutex::new(ConfirmationResult::Waiting)),
|
|
||||||
listeners: Default::default(),
|
queue.insert(id, ConfirmationSender {
|
||||||
|
sender,
|
||||||
request: ConfirmationRequest {
|
request: ConfirmationRequest {
|
||||||
id: id,
|
id,
|
||||||
payload: request,
|
payload: request,
|
||||||
origin: origin,
|
origin,
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
queue.get(&id).map(|token| token.as_promise()).expect("Token was just inserted.")
|
(id, receiver)
|
||||||
};
|
};
|
||||||
// Notify listeners
|
// Notify listeners
|
||||||
self.notify(QueueEvent::NewRequest(id));
|
self.notify(QueueEvent::NewRequest(id));
|
||||||
@ -264,7 +199,7 @@ impl SigningQueue for ConfirmationsQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn peek(&self, id: &U256) -> Option<ConfirmationRequest> {
|
fn peek(&self, id: &U256) -> Option<ConfirmationRequest> {
|
||||||
self.queue.read().get(id).map(|token| token.request.clone())
|
self.queue.read().get(id).map(|sender| sender.request.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn request_rejected(&self, id: U256) -> Option<ConfirmationRequest> {
|
fn request_rejected(&self, id: U256) -> Option<ConfirmationRequest> {
|
||||||
@ -272,14 +207,14 @@ impl SigningQueue for ConfirmationsQueue {
|
|||||||
self.remove(id, None)
|
self.remove(id, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn request_confirmed(&self, id: U256, result: RpcResult) -> Option<ConfirmationRequest> {
|
fn request_confirmed(&self, id: U256, result: ConfirmationResult) -> Option<ConfirmationRequest> {
|
||||||
debug!(target: "own_tx", "Signer: Transaction confirmed ({:?}).", id);
|
debug!(target: "own_tx", "Signer: Transaction confirmed ({:?}).", id);
|
||||||
self.remove(id, Some(result))
|
self.remove(id, Some(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn requests(&self) -> Vec<ConfirmationRequest> {
|
fn requests(&self) -> Vec<ConfirmationRequest> {
|
||||||
let queue = self.queue.read();
|
let queue = self.queue.read();
|
||||||
queue.values().map(|token| token.request.clone()).collect()
|
queue.values().map(|sender| sender.request.clone()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
@ -296,13 +231,14 @@ impl SigningQueue for ConfirmationsQueue {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use std::time::Duration;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
|
||||||
use std::sync::{mpsc, Arc};
|
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use v1::helpers::{SigningQueue, ConfirmationsQueue, QueueEvent, FilledTransactionRequest, ConfirmationPayload};
|
use jsonrpc_core::futures::Future;
|
||||||
|
use v1::helpers::{
|
||||||
|
SigningQueue, ConfirmationsQueue, QueueEvent, FilledTransactionRequest, ConfirmationPayload,
|
||||||
|
};
|
||||||
use v1::types::ConfirmationResponse;
|
use v1::types::ConfirmationResponse;
|
||||||
|
|
||||||
fn request() -> ConfirmationPayload {
|
fn request() -> ConfirmationPayload {
|
||||||
@ -326,25 +262,12 @@ mod test {
|
|||||||
let request = request();
|
let request = request();
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let q = queue.clone();
|
let (id, future) = queue.add_request(request, Default::default()).unwrap();
|
||||||
let handle = thread::spawn(move || {
|
|
||||||
let v = q.add_request(request, Default::default()).unwrap();
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
v.wait_for_result(move |res| {
|
|
||||||
tx.send(res).unwrap();
|
|
||||||
});
|
|
||||||
rx.recv().unwrap().expect("Should return hash")
|
|
||||||
});
|
|
||||||
|
|
||||||
let id = U256::from(1);
|
|
||||||
while queue.peek(&id).is_none() {
|
|
||||||
// Just wait for the other thread to start
|
|
||||||
thread::sleep(Duration::from_millis(100));
|
|
||||||
}
|
|
||||||
queue.request_confirmed(id, Ok(ConfirmationResponse::SendTransaction(1.into())));
|
queue.request_confirmed(id, Ok(ConfirmationResponse::SendTransaction(1.into())));
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(handle.join().expect("Thread should finish nicely"), Ok(ConfirmationResponse::SendTransaction(1.into())));
|
let confirmation = future.wait().unwrap();
|
||||||
|
assert_eq!(confirmation, Ok(ConfirmationResponse::SendTransaction(1.into())));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -359,7 +282,7 @@ mod test {
|
|||||||
queue.on_event(move |notification| {
|
queue.on_event(move |notification| {
|
||||||
r.lock().push(notification);
|
r.lock().push(notification);
|
||||||
});
|
});
|
||||||
queue.add_request(request, Default::default()).unwrap();
|
let _future = queue.add_request(request, Default::default()).unwrap();
|
||||||
queue.finish();
|
queue.finish();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
@ -376,7 +299,7 @@ mod test {
|
|||||||
let request = request();
|
let request = request();
|
||||||
|
|
||||||
// when
|
// when
|
||||||
queue.add_request(request.clone(), Default::default()).unwrap();
|
let _future = queue.add_request(request.clone(), Default::default()).unwrap();
|
||||||
let all = queue.requests();
|
let all = queue.requests();
|
||||||
|
|
||||||
// then
|
// then
|
||||||
|
@ -24,12 +24,12 @@ use parking_lot::Mutex;
|
|||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
|
|
||||||
use jsonrpc_core::{BoxFuture, Error};
|
use jsonrpc_core::{BoxFuture, Error};
|
||||||
use jsonrpc_core::futures::{future, Future};
|
use jsonrpc_core::futures::{future, Future, Poll, Async};
|
||||||
use jsonrpc_core::futures::future::Either;
|
use jsonrpc_core::futures::future::Either;
|
||||||
use v1::helpers::{
|
use v1::helpers::{
|
||||||
errors, oneshot,
|
errors, DefaultAccount, SignerService, SigningQueue,
|
||||||
DefaultAccount,
|
ConfirmationReceiver as RpcConfirmationReceiver,
|
||||||
SIGNING_QUEUE_LIMIT, SigningQueue, ConfirmationPromise, ConfirmationResult, SignerService,
|
ConfirmationResult as RpcConfirmationResult,
|
||||||
};
|
};
|
||||||
use v1::helpers::dispatch::{self, Dispatcher};
|
use v1::helpers::dispatch::{self, Dispatcher};
|
||||||
use v1::helpers::accounts::unwrap_provider;
|
use v1::helpers::accounts::unwrap_provider;
|
||||||
@ -45,61 +45,67 @@ use v1::types::{
|
|||||||
Origin,
|
Origin,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use parity_reactor::Remote;
|
||||||
|
|
||||||
/// After 60s entries that are not queried with `check_request` will get garbage collected.
|
/// After 60s entries that are not queried with `check_request` will get garbage collected.
|
||||||
const MAX_PENDING_DURATION_SEC: u32 = 60;
|
const MAX_PENDING_DURATION_SEC: u32 = 60;
|
||||||
/// Max number of total requests pending and completed, before we start garbage collecting them.
|
|
||||||
const MAX_TOTAL_REQUESTS: usize = SIGNING_QUEUE_LIMIT;
|
|
||||||
|
|
||||||
|
#[must_use = "futures do nothing unless polled"]
|
||||||
enum DispatchResult {
|
enum DispatchResult {
|
||||||
Promise(ConfirmationPromise),
|
Future(U256, RpcConfirmationReceiver),
|
||||||
Value(RpcConfirmationResponse),
|
Value(RpcConfirmationResponse),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Future for DispatchResult {
|
||||||
|
type Item = RpcConfirmationResponse;
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||||
|
match *self {
|
||||||
|
DispatchResult::Value(ref response) => Ok(Async::Ready(response.clone())),
|
||||||
|
DispatchResult::Future(_uid, ref mut future) => try_ready!(future.poll()).map(Async::Ready),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn schedule(remote: Remote,
|
||||||
|
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
|
||||||
|
id: U256,
|
||||||
|
future: RpcConfirmationReceiver) {
|
||||||
|
{
|
||||||
|
let mut confirmations = confirmations.lock();
|
||||||
|
confirmations.insert(id.clone(), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let future = future.then(move |result| {
|
||||||
|
let mut confirmations = confirmations.lock();
|
||||||
|
confirmations.prune();
|
||||||
|
let result = result.and_then(|response| response);
|
||||||
|
confirmations.insert(id, Some(result));
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
remote.spawn(future);
|
||||||
|
}
|
||||||
|
|
||||||
/// Implementation of functions that require signing when no trusted signer is used.
|
/// Implementation of functions that require signing when no trusted signer is used.
|
||||||
pub struct SigningQueueClient<D> {
|
pub struct SigningQueueClient<D> {
|
||||||
signer: Arc<SignerService>,
|
signer: Arc<SignerService>,
|
||||||
accounts: Option<Arc<AccountProvider>>,
|
accounts: Option<Arc<AccountProvider>>,
|
||||||
dispatcher: D,
|
dispatcher: D,
|
||||||
pending: Arc<Mutex<TransientHashMap<U256, ConfirmationPromise>>>,
|
remote: Remote,
|
||||||
}
|
// None here means that the request hasn't yet been confirmed
|
||||||
|
confirmations: Arc<Mutex<TransientHashMap<U256, Option<RpcConfirmationResult>>>>,
|
||||||
fn handle_dispatch<OnResponse>(res: Result<DispatchResult, Error>, on_response: OnResponse)
|
|
||||||
where OnResponse: FnOnce(Result<RpcConfirmationResponse, Error>) + Send + 'static
|
|
||||||
{
|
|
||||||
match res {
|
|
||||||
Ok(DispatchResult::Value(result)) => on_response(Ok(result)),
|
|
||||||
Ok(DispatchResult::Promise(promise)) => {
|
|
||||||
promise.wait_for_result(move |result| {
|
|
||||||
on_response(result.unwrap_or_else(|| Err(errors::request_rejected())))
|
|
||||||
})
|
|
||||||
},
|
|
||||||
Err(e) => on_response(Err(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn collect_garbage(map: &mut TransientHashMap<U256, ConfirmationPromise>) {
|
|
||||||
map.prune();
|
|
||||||
if map.len() > MAX_TOTAL_REQUESTS {
|
|
||||||
// Remove all non-waiting entries.
|
|
||||||
let non_waiting: Vec<_> = map
|
|
||||||
.iter()
|
|
||||||
.filter(|&(_, val)| val.result() != ConfirmationResult::Waiting)
|
|
||||||
.map(|(key, _)| *key)
|
|
||||||
.collect();
|
|
||||||
for k in non_waiting {
|
|
||||||
map.remove(&k);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Dispatcher + 'static> SigningQueueClient<D> {
|
impl<D: Dispatcher + 'static> SigningQueueClient<D> {
|
||||||
/// Creates a new signing queue client given shared signing queue.
|
/// Creates a new signing queue client given shared signing queue.
|
||||||
pub fn new(signer: &Arc<SignerService>, dispatcher: D, accounts: &Option<Arc<AccountProvider>>) -> Self {
|
pub fn new(signer: &Arc<SignerService>, dispatcher: D, remote: Remote, accounts: &Option<Arc<AccountProvider>>) -> Self {
|
||||||
SigningQueueClient {
|
SigningQueueClient {
|
||||||
signer: signer.clone(),
|
signer: signer.clone(),
|
||||||
accounts: accounts.clone(),
|
accounts: accounts.clone(),
|
||||||
dispatcher: dispatcher,
|
dispatcher,
|
||||||
pending: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))),
|
remote,
|
||||||
|
confirmations: Arc::new(Mutex::new(TransientHashMap::new(MAX_PENDING_DURATION_SEC))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +132,7 @@ impl<D: Dispatcher + 'static> SigningQueueClient<D> {
|
|||||||
} else {
|
} else {
|
||||||
Either::B(future::done(
|
Either::B(future::done(
|
||||||
signer.add_request(payload, origin)
|
signer.add_request(payload, origin)
|
||||||
.map(DispatchResult::Promise)
|
.map(|(id, future)| DispatchResult::Future(id, future))
|
||||||
.map_err(|_| errors::request_rejected_limit())
|
.map_err(|_| errors::request_rejected_limit())
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@ -144,35 +150,31 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
|
fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
|
||||||
let pending = self.pending.clone();
|
let remote = self.remote.clone();
|
||||||
|
let confirmations = self.confirmations.clone();
|
||||||
|
|
||||||
Box::new(self.dispatch(
|
Box::new(self.dispatch(
|
||||||
RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()),
|
RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()),
|
||||||
DefaultAccount::Provided(address.into()),
|
DefaultAccount::Provided(address.into()),
|
||||||
meta.origin
|
meta.origin
|
||||||
).map(move |result| match result {
|
).map(move |result| match result {
|
||||||
DispatchResult::Value(v) => RpcEither::Or(v),
|
DispatchResult::Value(v) => RpcEither::Or(v),
|
||||||
DispatchResult::Promise(promise) => {
|
DispatchResult::Future(id, future) => {
|
||||||
let id = promise.id();
|
schedule(remote, confirmations, id, future);
|
||||||
let mut pending = pending.lock();
|
|
||||||
collect_garbage(&mut pending);
|
|
||||||
pending.insert(id, promise);
|
|
||||||
|
|
||||||
RpcEither::Either(id.into())
|
RpcEither::Either(id.into())
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
|
fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
|
||||||
let pending = self.pending.clone();
|
let remote = self.remote.clone();
|
||||||
Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin)
|
let confirmations = self.confirmations.clone();
|
||||||
.map(move |result| match result {
|
|
||||||
DispatchResult::Value(v) => RpcEither::Or(v),
|
|
||||||
DispatchResult::Promise(promise) => {
|
|
||||||
let id = promise.id();
|
|
||||||
let mut pending = pending.lock();
|
|
||||||
collect_garbage(&mut pending);
|
|
||||||
pending.insert(id, promise);
|
|
||||||
|
|
||||||
|
Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin)
|
||||||
|
.map(|result| match result {
|
||||||
|
DispatchResult::Value(v) => RpcEither::Or(v),
|
||||||
|
DispatchResult::Future(id, future) => {
|
||||||
|
schedule(remote, confirmations, id, future);
|
||||||
RpcEither::Either(id.into())
|
RpcEither::Either(id.into())
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
@ -180,13 +182,10 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
|
|||||||
|
|
||||||
fn check_request(&self, id: RpcU256) -> Result<Option<RpcConfirmationResponse>, Error> {
|
fn check_request(&self, id: RpcU256) -> Result<Option<RpcConfirmationResponse>, Error> {
|
||||||
let id: U256 = id.into();
|
let id: U256 = id.into();
|
||||||
match self.pending.lock().get(&id) {
|
match self.confirmations.lock().get(&id) {
|
||||||
Some(ref promise) => match promise.result() {
|
None => Err(errors::request_not_found()), // Request info has been dropped, or even never been there
|
||||||
ConfirmationResult::Waiting => Ok(None),
|
Some(&None) => Ok(None), // No confirmation yet, request is known, confirmation is pending
|
||||||
ConfirmationResult::Rejected => Err(errors::request_rejected()),
|
Some(&Some(ref confirmation)) => confirmation.clone().map(Some), // Confirmation is there
|
||||||
ConfirmationResult::Confirmed(rpc_response) => rpc_response.map(Some),
|
|
||||||
},
|
|
||||||
_ => Err(errors::request_not_found()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,20 +196,12 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
|
|||||||
meta.origin,
|
meta.origin,
|
||||||
);
|
);
|
||||||
|
|
||||||
let (ready, p) = oneshot::oneshot();
|
// when dispatch is complete - wait for result and then
|
||||||
|
Box::new(res.flatten().and_then(move |response| {
|
||||||
// when dispatch is complete
|
|
||||||
Box::new(res.then(move |res| {
|
|
||||||
// register callback via the oneshot sender.
|
|
||||||
handle_dispatch(res, move |response| {
|
|
||||||
match response {
|
match response {
|
||||||
Ok(RpcConfirmationResponse::Decrypt(data)) => ready.send(Ok(data)),
|
RpcConfirmationResponse::Decrypt(data) => Ok(data),
|
||||||
Err(e) => ready.send(Err(e)),
|
e => Err(errors::internal("Unexpected result.", e)),
|
||||||
e => ready.send(Err(errors::internal("Unexpected result.", e))),
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
p
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -225,18 +216,11 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
|
|||||||
meta.origin,
|
meta.origin,
|
||||||
);
|
);
|
||||||
|
|
||||||
let (ready, p) = oneshot::oneshot();
|
Box::new(res.flatten().and_then(move |response| {
|
||||||
|
|
||||||
Box::new(res.then(move |res| {
|
|
||||||
handle_dispatch(res, move |response| {
|
|
||||||
match response {
|
match response {
|
||||||
Ok(RpcConfirmationResponse::Signature(sig)) => ready.send(Ok(sig)),
|
RpcConfirmationResponse::Signature(sig) => Ok(sig),
|
||||||
Err(e) => ready.send(Err(e)),
|
e => Err(errors::internal("Unexpected result.", e)),
|
||||||
e => ready.send(Err(errors::internal("Unexpected result.", e))),
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
p
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -247,18 +231,11 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
|
|||||||
meta.origin,
|
meta.origin,
|
||||||
);
|
);
|
||||||
|
|
||||||
let (ready, p) = oneshot::oneshot();
|
Box::new(res.flatten().and_then(move |response| {
|
||||||
|
|
||||||
Box::new(res.then(move |res| {
|
|
||||||
handle_dispatch(res, move |response| {
|
|
||||||
match response {
|
match response {
|
||||||
Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.send(Ok(hash)),
|
RpcConfirmationResponse::SendTransaction(hash) => Ok(hash),
|
||||||
Err(e) => ready.send(Err(e)),
|
e => Err(errors::internal("Unexpected result.", e)),
|
||||||
e => ready.send(Err(errors::internal("Unexpected result.", e))),
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
p
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,18 +246,11 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
|
|||||||
meta.origin,
|
meta.origin,
|
||||||
);
|
);
|
||||||
|
|
||||||
let (ready, p) = oneshot::oneshot();
|
Box::new(res.flatten().and_then(move |response| {
|
||||||
|
|
||||||
Box::new(res.then(move |res| {
|
|
||||||
handle_dispatch(res, move |response| {
|
|
||||||
match response {
|
match response {
|
||||||
Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.send(Ok(tx)),
|
RpcConfirmationResponse::SignTransaction(tx) => Ok(tx),
|
||||||
Err(e) => ready.send(Err(e)),
|
e => Err(errors::internal("Unexpected result.", e)),
|
||||||
e => ready.send(Err(errors::internal("Unexpected result.", e))),
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
p
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -80,7 +80,7 @@ fn signer_tester() -> SignerTester {
|
|||||||
fn should_return_list_of_items_to_confirm() {
|
fn should_return_list_of_items_to_confirm() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _send_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: Address::from(1),
|
from: Address::from(1),
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||||
@ -91,7 +91,7 @@ fn should_return_list_of_items_to_confirm() {
|
|||||||
nonce: None,
|
nonce: None,
|
||||||
condition: None,
|
condition: None,
|
||||||
}), Origin::Dapps("http://parity.io".into())).unwrap();
|
}), Origin::Dapps("http://parity.io".into())).unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), Origin::Unknown).unwrap();
|
let _sign_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), Origin::Unknown).unwrap();
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{"jsonrpc":"2.0","method":"signer_requestsToConfirm","params":[],"id":1}"#;
|
let request = r#"{"jsonrpc":"2.0","method":"signer_requestsToConfirm","params":[],"id":1}"#;
|
||||||
@ -111,7 +111,7 @@ fn should_return_list_of_items_to_confirm() {
|
|||||||
fn should_reject_transaction_from_queue_without_dispatching() {
|
fn should_reject_transaction_from_queue_without_dispatching() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: Address::from(1),
|
from: Address::from(1),
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||||
@ -138,7 +138,7 @@ fn should_reject_transaction_from_queue_without_dispatching() {
|
|||||||
fn should_not_remove_transaction_if_password_is_invalid() {
|
fn should_not_remove_transaction_if_password_is_invalid() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: Address::from(1),
|
from: Address::from(1),
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||||
@ -164,7 +164,7 @@ fn should_not_remove_transaction_if_password_is_invalid() {
|
|||||||
fn should_not_remove_sign_if_password_is_invalid() {
|
fn should_not_remove_sign_if_password_is_invalid() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.signer.add_request(ConfirmationPayload::EthSignMessage(0.into(), vec![5].into()), Origin::Unknown).unwrap();
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(0.into(), vec![5].into()), Origin::Unknown).unwrap();
|
||||||
assert_eq!(tester.signer.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
@ -182,7 +182,7 @@ fn should_confirm_transaction_and_dispatch() {
|
|||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: address,
|
from: address,
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(recipient),
|
to: Some(recipient),
|
||||||
@ -228,7 +228,7 @@ fn should_alter_the_sender_and_nonce() {
|
|||||||
//// given
|
//// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: 0.into(),
|
from: 0.into(),
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(recipient),
|
to: Some(recipient),
|
||||||
@ -278,7 +278,7 @@ fn should_confirm_transaction_with_token() {
|
|||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: address,
|
from: address,
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(recipient),
|
to: Some(recipient),
|
||||||
@ -327,7 +327,7 @@ fn should_confirm_transaction_with_rlp() {
|
|||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: address,
|
from: address,
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(recipient),
|
to: Some(recipient),
|
||||||
@ -374,7 +374,7 @@ fn should_return_error_when_sender_does_not_match() {
|
|||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
|
||||||
from: Address::default(),
|
from: Address::default(),
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(recipient),
|
to: Some(recipient),
|
||||||
@ -421,7 +421,7 @@ fn should_confirm_sign_transaction_with_rlp() {
|
|||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::SignTransaction(FilledTransactionRequest {
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::SignTransaction(FilledTransactionRequest {
|
||||||
from: address,
|
from: address,
|
||||||
used_default_from: false,
|
used_default_from: false,
|
||||||
to: Some(recipient),
|
to: Some(recipient),
|
||||||
@ -485,7 +485,7 @@ fn should_confirm_data_sign_with_signature() {
|
|||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::EthSignMessage(
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(
|
||||||
address,
|
address,
|
||||||
vec![1, 2, 3, 4].into(),
|
vec![1, 2, 3, 4].into(),
|
||||||
), Origin::Unknown).unwrap();
|
), Origin::Unknown).unwrap();
|
||||||
@ -515,7 +515,7 @@ fn should_confirm_decrypt_with_phrase() {
|
|||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
tester.signer.add_request(ConfirmationPayload::Decrypt(
|
let _confirmation_future = tester.signer.add_request(ConfirmationPayload::Decrypt(
|
||||||
address,
|
address,
|
||||||
vec![1, 2, 3, 4].into(),
|
vec![1, 2, 3, 4].into(),
|
||||||
), Origin::Unknown).unwrap();
|
), Origin::Unknown).unwrap();
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::thread;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -39,6 +40,8 @@ use ethcore::transaction::{Transaction, Action, SignedTransaction};
|
|||||||
use ethstore::ethkey::{Generator, Random};
|
use ethstore::ethkey::{Generator, Random};
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
|
||||||
|
use parity_reactor::Remote;
|
||||||
|
|
||||||
struct SigningTester {
|
struct SigningTester {
|
||||||
pub signer: Arc<SignerService>,
|
pub signer: Arc<SignerService>,
|
||||||
pub client: Arc<TestBlockChainClient>,
|
pub client: Arc<TestBlockChainClient>,
|
||||||
@ -58,9 +61,11 @@ impl Default for SigningTester {
|
|||||||
|
|
||||||
let dispatcher = FullDispatcher::new(client.clone(), miner.clone());
|
let dispatcher = FullDispatcher::new(client.clone(), miner.clone());
|
||||||
|
|
||||||
let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), &opt_accounts);
|
let remote = Remote::new_thread_per_future();
|
||||||
|
|
||||||
|
let rpc = SigningQueueClient::new(&signer, dispatcher.clone(), remote.clone(), &opt_accounts);
|
||||||
io.extend_with(EthSigning::to_delegate(rpc));
|
io.extend_with(EthSigning::to_delegate(rpc));
|
||||||
let rpc = SigningQueueClient::new(&signer, dispatcher, &opt_accounts);
|
let rpc = SigningQueueClient::new(&signer, dispatcher, remote, &opt_accounts);
|
||||||
io.extend_with(ParitySigning::to_delegate(rpc));
|
io.extend_with(ParitySigning::to_delegate(rpc));
|
||||||
|
|
||||||
SigningTester {
|
SigningTester {
|
||||||
@ -184,6 +189,9 @@ fn should_check_status_of_request_when_its_resolved() {
|
|||||||
tester.io.handle_request_sync(&request).expect("Sent");
|
tester.io.handle_request_sync(&request).expect("Sent");
|
||||||
tester.signer.request_confirmed(1.into(), Ok(ConfirmationResponse::Signature(1.into())));
|
tester.signer.request_confirmed(1.into(), Ok(ConfirmationResponse::Signature(1.into())));
|
||||||
|
|
||||||
|
// This is not ideal, but we need to give futures some time to be executed, and they need to run in a separate thread
|
||||||
|
thread::sleep(Duration::from_millis(20));
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{
|
let request = r#"{
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
|
@ -32,6 +32,7 @@ ethcore-bytes = { path = "bytes" }
|
|||||||
memorydb = { path = "memorydb" }
|
memorydb = { path = "memorydb" }
|
||||||
util-error = { path = "error" }
|
util-error = { path = "error" }
|
||||||
kvdb = { path = "kvdb" }
|
kvdb = { path = "kvdb" }
|
||||||
|
journaldb = { path = "journaldb" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
kvdb-memorydb = { path = "kvdb-memorydb" }
|
kvdb-memorydb = { path = "kvdb-memorydb" }
|
||||||
|
23
util/journaldb/Cargo.toml
Normal file
23
util/journaldb/Cargo.toml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
[package]
|
||||||
|
name = "journaldb"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
description = "A `HashDB` which can manage a short-term journal potentially containing many forks of mutually exclusive actions"
|
||||||
|
license = "GPL3"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
ethcore-bigint = { path = "../bigint", features = ["heapsizeof"] }
|
||||||
|
ethcore-bytes = { path = "../bytes" }
|
||||||
|
hashdb = { path = "../hashdb" }
|
||||||
|
heapsize = "0.4"
|
||||||
|
kvdb = { path = "../kvdb" }
|
||||||
|
log = "0.3"
|
||||||
|
memorydb = { path = "../memorydb" }
|
||||||
|
parking_lot = "0.4"
|
||||||
|
rlp = { path = "../rlp" }
|
||||||
|
util-error = { path = "../error" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
ethcore-logger = { path = "../../logger" }
|
||||||
|
hash = { path = "../hash" }
|
||||||
|
kvdb-memorydb = { path = "../kvdb-memorydb" }
|
@ -21,9 +21,9 @@ use std::collections::hash_map::Entry;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
use hashdb::*;
|
use hashdb::*;
|
||||||
use super::super::memorydb::*;
|
use super::memorydb::*;
|
||||||
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
|
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
|
||||||
use super::traits::JournalDB;
|
use traits::JournalDB;
|
||||||
use kvdb::{KeyValueDB, DBTransaction};
|
use kvdb::{KeyValueDB, DBTransaction};
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use error::{BaseDataError, UtilError};
|
use error::{BaseDataError, UtilError};
|
||||||
@ -202,8 +202,7 @@ mod tests {
|
|||||||
use keccak::keccak;
|
use keccak::keccak;
|
||||||
use hashdb::{HashDB, DBValue};
|
use hashdb::{HashDB, DBValue};
|
||||||
use super::*;
|
use super::*;
|
||||||
use journaldb::traits::JournalDB;
|
use {kvdb_memorydb, JournalDB};
|
||||||
use kvdb_memorydb;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_same_in_fork() {
|
fn insert_same_in_fork() {
|
@ -16,6 +16,26 @@
|
|||||||
|
|
||||||
//! `JournalDB` interface and implementation.
|
//! `JournalDB` interface and implementation.
|
||||||
|
|
||||||
|
extern crate heapsize;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate log;
|
||||||
|
|
||||||
|
extern crate ethcore_bigint as bigint;
|
||||||
|
extern crate ethcore_bytes as bytes;
|
||||||
|
extern crate hashdb;
|
||||||
|
extern crate kvdb;
|
||||||
|
extern crate memorydb;
|
||||||
|
extern crate parking_lot;
|
||||||
|
extern crate rlp;
|
||||||
|
extern crate util_error as error;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate ethcore_logger;
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate hash as keccak;
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate kvdb_memorydb;
|
||||||
|
|
||||||
use std::{fmt, str};
|
use std::{fmt, str};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@ -26,6 +46,8 @@ mod earlymergedb;
|
|||||||
mod overlayrecentdb;
|
mod overlayrecentdb;
|
||||||
mod refcounteddb;
|
mod refcounteddb;
|
||||||
|
|
||||||
|
pub mod overlaydb;
|
||||||
|
|
||||||
/// Export the `JournalDB` trait.
|
/// Export the `JournalDB` trait.
|
||||||
pub use self::traits::JournalDB;
|
pub use self::traits::JournalDB;
|
||||||
|
|
@ -459,8 +459,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use hashdb::{HashDB, DBValue};
|
use hashdb::{HashDB, DBValue};
|
||||||
use ethcore_logger::init_log;
|
use ethcore_logger::init_log;
|
||||||
use journaldb::JournalDB;
|
use {kvdb_memorydb, JournalDB};
|
||||||
use kvdb_memorydb;
|
|
||||||
|
|
||||||
fn new_db() -> OverlayRecentDB {
|
fn new_db() -> OverlayRecentDB {
|
||||||
let backing = Arc::new(kvdb_memorydb::create(0));
|
let backing = Arc::new(kvdb_memorydb::create(0));
|
@ -210,9 +210,8 @@ mod tests {
|
|||||||
|
|
||||||
use keccak::keccak;
|
use keccak::keccak;
|
||||||
use hashdb::{HashDB, DBValue};
|
use hashdb::{HashDB, DBValue};
|
||||||
use kvdb_memorydb;
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use super::super::traits::JournalDB;
|
use {JournalDB, kvdb_memorydb};
|
||||||
|
|
||||||
fn new_db() -> RefCountedDB {
|
fn new_db() -> RefCountedDB {
|
||||||
let backing = Arc::new(kvdb_memorydb::create(0));
|
let backing = Arc::new(kvdb_memorydb::create(0));
|
@ -112,18 +112,12 @@ extern crate util_error as error;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
extern crate kvdb_memorydb;
|
extern crate kvdb_memorydb;
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate log as rlog;
|
|
||||||
|
|
||||||
pub mod misc;
|
pub mod misc;
|
||||||
pub mod overlaydb;
|
|
||||||
pub mod journaldb;
|
|
||||||
|
|
||||||
pub use misc::*;
|
pub use misc::*;
|
||||||
pub use hashdb::*;
|
pub use hashdb::*;
|
||||||
pub use memorydb::MemoryDB;
|
pub use memorydb::MemoryDB;
|
||||||
pub use overlaydb::*;
|
|
||||||
pub use journaldb::JournalDB;
|
|
||||||
|
|
||||||
/// 160-bit integer representing account address
|
/// 160-bit integer representing account address
|
||||||
pub type Address = bigint::hash::H160;
|
pub type Address = bigint::hash::H160;
|
||||||
|
Loading…
Reference in New Issue
Block a user